summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-usb-usbtmc2
-rw-r--r--Documentation/ABI/testing/debugfs-olpc16
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-acpi20
-rw-r--r--Documentation/CodingStyle29
-rw-r--r--Documentation/acpi/apei/einj.txt8
-rw-r--r--Documentation/aoe/aoe.txt2
-rw-r--r--Documentation/aoe/autoload.sh4
-rw-r--r--Documentation/blockdev/floppy.txt2
-rw-r--r--Documentation/cpuidle/sysfs.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/arm-versatile.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-dataflash.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/fsmc-nand.txt33
-rw-r--r--Documentation/devicetree/bindings/mtd/gpio-control-nand.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt23
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt38
-rw-r--r--Documentation/devicetree/bindings/mtd/spear_smi.txt31
-rw-r--r--Documentation/devicetree/bindings/power_supply/max17042_battery.txt18
-rw-r--r--Documentation/devicetree/usage-model.txt412
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/fb/intel810.txt2
-rw-r--r--Documentation/fb/intelfb.txt2
-rw-r--r--Documentation/filesystems/files.txt4
-rw-r--r--Documentation/i2c/busses/scx200_acb2
-rw-r--r--Documentation/ide/ide.txt2
-rw-r--r--Documentation/input/input.txt4
-rw-r--r--Documentation/isdn/README.gigaset16
-rw-r--r--Documentation/kbuild/kconfig.txt8
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/laptops/sonypi.txt2
-rw-r--r--Documentation/mono.txt8
-rw-r--r--Documentation/networking/baycom.txt2
-rw-r--r--Documentation/networking/bonding.txt46
-rw-r--r--Documentation/networking/dl2k.txt11
-rw-r--r--Documentation/networking/e100.txt6
-rw-r--r--Documentation/networking/ipv6.txt6
-rw-r--r--Documentation/networking/ixgb.txt6
-rw-r--r--Documentation/networking/ltpc.txt2
-rw-r--r--Documentation/networking/vortex.txt6
-rw-r--r--Documentation/parport.txt13
-rw-r--r--Documentation/s390/3270.txt21
-rw-r--r--Documentation/scsi/aic79xx.txt2
-rw-r--r--Documentation/scsi/aic7xxx.txt2
-rw-r--r--Documentation/scsi/osst.txt2
-rw-r--r--Documentation/serial/computone.txt8
-rw-r--r--Documentation/serial/rocket.txt2
-rw-r--r--Documentation/serial/stallion.txt4
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt10
-rw-r--r--Documentation/sound/alsa/Audiophile-Usb.txt4
-rw-r--r--Documentation/sound/alsa/MIXART.txt6
-rw-r--r--Documentation/sound/alsa/OSS-Emulation.txt2
-rw-r--r--Documentation/sound/oss/AudioExcelDSP1610
-rw-r--r--Documentation/sound/oss/CMI83305
-rw-r--r--Documentation/sound/oss/Introduction10
-rw-r--r--Documentation/sound/oss/Opti8
-rw-r--r--Documentation/sound/oss/PAS164
-rw-r--r--Documentation/sound/oss/README.modules10
-rw-r--r--Documentation/sysrq.txt5
-rw-r--r--Documentation/usb/power-management.txt3
-rw-r--r--Documentation/video4linux/CQcam.txt14
-rw-r--r--Documentation/video4linux/Zoran2
-rw-r--r--Documentation/video4linux/bttv/Modules.conf2
-rw-r--r--Documentation/video4linux/meye.txt2
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/posix_types.h113
-rw-r--r--arch/arm/Kconfig50
-rw-r--r--arch/arm/Kconfig.debug16
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/compressed/.gitignore2
-rw-r--r--arch/arm/boot/compressed/Makefile15
-rw-r--r--arch/arm/boot/compressed/decompress.c6
-rw-r--r--arch/arm/boot/compressed/piggy.xzkern.S6
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/gic.c13
-rw-r--r--arch/arm/common/pl330.c1960
-rw-r--r--arch/arm/configs/integrator_defconfig8
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/cpuidle.h29
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h6
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h2
-rw-r--r--arch/arm/include/asm/hardware/it8152.h3
-rw-r--r--arch/arm/include/asm/hardware/pl330.h217
-rw-r--r--arch/arm/include/asm/io.h71
-rw-r--r--arch/arm/include/asm/irq.h8
-rw-r--r--arch/arm/include/asm/jump_label.h41
-rw-r--r--arch/arm/include/asm/mc146818rtc.h4
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/mmu_context.h29
-rw-r--r--arch/arm/include/asm/opcodes.h59
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/perf_event.h1
-rw-r--r--arch/arm/include/asm/posix_types.h55
-rw-r--r--arch/arm/include/asm/processor.h1
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h136
-rw-r--r--arch/arm/include/asm/traps.h2
-rw-r--r--arch/arm/kernel/Makefile16
-rw-r--r--arch/arm/kernel/cpuidle.c21
-rw-r--r--arch/arm/kernel/debug.S26
-rw-r--r--arch/arm/kernel/entry-armv.S1
-rw-r--r--arch/arm/kernel/ftrace.c100
-rw-r--r--arch/arm/kernel/head.S8
-rw-r--r--arch/arm/kernel/insn.c61
-rw-r--r--arch/arm/kernel/insn.h29
-rw-r--r--arch/arm/kernel/irq.c5
-rw-r--r--arch/arm/kernel/jump_label.c39
-rw-r--r--arch/arm/kernel/kprobes.c86
-rw-r--r--arch/arm/kernel/machine_kexec.c25
-rw-r--r--arch/arm/kernel/patch.c75
-rw-r--r--arch/arm/kernel/patch.h7
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event_v7.c145
-rw-r--r--arch/arm/kernel/process.c36
-rw-r--r--arch/arm/kernel/sched_clock.c18
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/kernel/signal.c24
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c19
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c1
-rw-r--r--arch/arm/mach-at91/at91x40.c1
-rw-r--r--arch/arm/mach-at91/cpuidle.c59
-rw-r--r--arch/arm/mach-at91/include/mach/at_hdmac.h15
-rw-r--r--arch/arm/mach-at91/include/mach/io.h31
-rw-r--r--arch/arm/mach-at91/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-at91/setup.c1
-rw-r--r--arch/arm/mach-bcmring/include/mach/io.h33
-rw-r--r--arch/arm/mach-clps711x/edb7211-mm.c1
-rw-r--r--arch/arm/mach-clps711x/include/mach/io.h36
-rw-r--r--arch/arm/mach-clps711x/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-cns3xxx/core.c8
-rw-r--r--arch/arm/mach-cns3xxx/devices.c2
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/io.h17
-rw-r--r--arch/arm/mach-davinci/cpuidle.c83
-rw-r--r--arch/arm/mach-davinci/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/io.h24
-rw-r--r--arch/arm/mach-davinci/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-davinci/time.c28
-rw-r--r--arch/arm/mach-dove/addr-map.c1
-rw-r--r--arch/arm/mach-dove/include/mach/io.h1
-rw-r--r--arch/arm/mach-ebsa110/core.c15
-rw-r--r--arch/arm/mach-ebsa110/include/mach/io.h9
-rw-r--r--arch/arm/mach-ep93xx/include/mach/io.h22
-rw-r--r--arch/arm/mach-exynos/include/mach/io.h26
-rw-r--r--arch/arm/mach-footbridge/include/mach/io.h13
-rw-r--r--arch/arm/mach-gemini/include/mach/io.h18
-rw-r--r--arch/arm/mach-h720x/common.c1
-rw-r--r--arch/arm/mach-h720x/include/mach/io.h22
-rw-r--r--arch/arm/mach-highbank/highbank.c1
-rw-r--r--arch/arm/mach-highbank/include/mach/io.h7
-rw-r--r--arch/arm/mach-highbank/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-imx/Kconfig6
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/dma-v1.c845
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h103
-rw-r--r--arch/arm/mach-imx/mm-imx3.c11
-rw-r--r--arch/arm/mach-imx/mm-imx5.c1
-rw-r--r--arch/arm/mach-integrator/core.c3
-rw-r--r--arch/arm/mach-integrator/include/mach/io.h1
-rw-r--r--arch/arm/mach-integrator/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c10
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c3
-rw-r--r--arch/arm/mach-integrator/pci.c3
-rw-r--r--arch/arm/mach-integrator/pci_v3.c3
-rw-r--r--arch/arm/mach-iop13xx/include/mach/io.h13
-rw-r--r--arch/arm/mach-iop13xx/include/mach/iop13xx.h1
-rw-r--r--arch/arm/mach-iop13xx/io.c20
-rw-r--r--arch/arm/mach-iop13xx/iq81340mc.c1
-rw-r--r--arch/arm/mach-iop13xx/iq81340sc.c1
-rw-r--r--arch/arm/mach-iop13xx/pci.h6
-rw-r--r--arch/arm/mach-iop32x/include/mach/io.h1
-rw-r--r--arch/arm/mach-iop33x/include/mach/io.h1
-rw-r--r--arch/arm/mach-ixp2000/include/mach/io.h1
-rw-r--r--arch/arm/mach-ixp23xx/core.c1
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/io.h1
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c34
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c1
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h24
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/platform.h1
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c4
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/omixp-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/cpuidle.c72
-rw-r--r--arch/arm/mach-kirkwood/include/mach/io.h2
-rw-r--r--arch/arm/mach-ks8695/include/mach/io.h19
-rw-r--r--arch/arm/mach-lpc32xx/clock.c2
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/io.h27
-rw-r--r--arch/arm/mach-mmp/aspenite.c5
-rw-r--r--arch/arm/mach-mmp/avengers_lite.c1
-rw-r--r--arch/arm/mach-mmp/brownstone.c4
-rw-r--r--arch/arm/mach-mmp/flint.c3
-rw-r--r--arch/arm/mach-mmp/gplugd.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/addr-map.h6
-rw-r--r--arch/arm/mach-mmp/include/mach/io.h21
-rw-r--r--arch/arm/mach-mmp/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-mmp/irq-mmp2.c1
-rw-r--r--arch/arm/mach-mmp/jasper.c5
-rw-r--r--arch/arm/mach-mmp/tavorevb.c1
-rw-r--r--arch/arm/mach-mmp/teton_bga.c3
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c4
-rw-r--r--arch/arm/mach-msm/board-halibut.c6
-rw-r--r--arch/arm/mach-msm/board-trout.c6
-rw-r--r--arch/arm/mach-msm/include/mach/io.h36
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h12
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8960.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x60.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h6
-rw-r--r--arch/arm/mach-msm/io.c8
-rw-r--r--arch/arm/mach-msm/timer.c12
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/io.h2
-rw-r--r--arch/arm/mach-mxs/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-mxs/include/mach/io.h22
-rw-r--r--arch/arm/mach-netx/generic.c2
-rw-r--r--arch/arm/mach-netx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-netx/include/mach/io.h28
-rw-r--r--arch/arm/mach-netx/include/mach/netx-regs.h16
-rw-r--r--arch/arm/mach-nomadik/include/mach/io.h22
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S1
-rw-r--r--arch/arm/mach-omap1/board-h2.c8
-rw-r--r--arch/arm/mach-omap1/board-h3.c9
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c6
-rw-r--r--arch/arm/mach-omap1/board-innovator.c4
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c2
-rw-r--r--arch/arm/mach-omap1/board-osk.c12
-rw-r--r--arch/arm/mach-omap1/board-palmte.c2
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c2
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c2
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c16
-rw-r--r--arch/arm/mach-omap1/flash.c20
-rw-r--r--arch/arm/mach-omap1/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-omap1/include/mach/io.h46
-rw-r--r--arch/arm/mach-omap1/iomap.h6
-rw-r--r--arch/arm/mach-omap1/pm.c1
-rw-r--r--arch/arm/mach-omap1/sleep.S2
-rw-r--r--arch/arm/mach-omap1/sram.S1
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-apollon.c4
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-h4.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c42
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c21
-rw-r--r--arch/arm/mach-omap2/display.c8
-rw-r--r--arch/arm/mach-omap2/include/mach/io.h49
-rw-r--r--arch/arm/mach-omap2/iomap.h6
-rw-r--r--arch/arm/mach-omap2/pm.c2
-rw-r--r--arch/arm/mach-orion5x/common.h9
-rw-r--r--arch/arm/mach-orion5x/include/mach/io.h33
-rw-r--r--arch/arm/mach-orion5x/pci.c1
-rw-r--r--arch/arm/mach-orion5x/tsx09-common.c1
-rw-r--r--arch/arm/mach-picoxcell/include/mach/io.h22
-rw-r--r--arch/arm/mach-picoxcell/include/mach/irqs.h20
-rw-r--r--arch/arm/mach-pnx4008/include/mach/io.h21
-rw-r--r--arch/arm/mach-prima2/include/mach/io.h16
-rw-r--r--arch/arm/mach-prima2/timer.c21
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-pxa/capc7117.c1
-rw-r--r--arch/arm/mach-pxa/clock-pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c1
-rw-r--r--arch/arm/mach-pxa/corgi.c3
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c1
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c1
-rw-r--r--arch/arm/mach-pxa/csb726.c1
-rw-r--r--arch/arm/mach-pxa/devices.c1
-rw-r--r--arch/arm/mach-pxa/em-x270.c12
-rw-r--r--arch/arm/mach-pxa/gumstix.c1
-rw-r--r--arch/arm/mach-pxa/h5000.c1
-rw-r--r--arch/arm/mach-pxa/himalaya.c1
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c1
-rw-r--r--arch/arm/mach-pxa/idp.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-pxa/include/mach/io.h20
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/mainstone.h2
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/mp900.c1
-rw-r--r--arch/arm/mach-pxa/palmld.c1
-rw-r--r--arch/arm/mach-pxa/palmt5.c1
-rw-r--r--arch/arm/mach-pxa/palmtc.c1
-rw-r--r--arch/arm/mach-pxa/palmte2.c1
-rw-r--r--arch/arm/mach-pxa/palmtreo.c2
-rw-r--r--arch/arm/mach-pxa/palmtx.c1
-rw-r--r--arch/arm/mach-pxa/palmz72.c1
-rw-r--r--arch/arm/mach-pxa/pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/pxa300.c1
-rw-r--r--arch/arm/mach-pxa/pxa320.c1
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c1
-rw-r--r--arch/arm/mach-pxa/raumfeld.c5
-rw-r--r--arch/arm/mach-pxa/saar.c1
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c1
-rw-r--r--arch/arm/mach-pxa/spitz.c3
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-pxa/tavorevb.c1
-rw-r--r--arch/arm/mach-pxa/time.c1
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/viper.c1
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-pxa/xcep.c1
-rw-r--r--arch/arm/mach-pxa/z2.c1
-rw-r--r--arch/arm/mach-realview/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-realview/include/mach/io.h28
-rw-r--r--arch/arm/mach-rpc/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-rpc/include/mach/io.h5
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/io.h5
-rw-r--r--arch/arm/mach-s3c24xx/simtec-nor.c3
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/io.h18
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/io.h25
-rw-r--r--arch/arm/mach-s5pc100/include/mach/io.h18
-rw-r--r--arch/arm/mach-s5pv210/include/mach/io.h26
-rw-r--r--arch/arm/mach-sa1100/include/mach/io.h20
-rw-r--r--arch/arm/mach-shark/core.c1
-rw-r--r--arch/arm/mach-shark/include/mach/io.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig4
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c3
-rw-r--r--arch/arm/mach-shmobile/board-bonito.c3
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c3
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c72
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c4
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c31
-rw-r--r--arch/arm/mach-shmobile/include/mach/io.h9
-rw-r--r--arch/arm/mach-shmobile/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7740.c1
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7779.c4
-rw-r--r--arch/arm/mach-shmobile/intc-sh7367.c1
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/intc-sh7377.c1
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c5
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c1
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c1
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c4
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c20
-rw-r--r--arch/arm/mach-spear3xx/clock.c1
-rw-r--r--arch/arm/mach-spear3xx/include/mach/io.h19
-rw-r--r--arch/arm/mach-spear6xx/clock.c1
-rw-r--r--arch/arm/mach-spear6xx/include/mach/io.h20
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c6
-rw-r--r--arch/arm/mach-tegra/devices.c7
-rw-r--r--arch/arm/mach-tegra/devices.h5
-rw-r--r--arch/arm/mach-tegra/include/mach/debug-macro.S1
-rw-r--r--arch/arm/mach-tegra/include/mach/io.h49
-rw-r--r--arch/arm/mach-tegra/include/mach/iomap.h42
-rw-r--r--arch/arm/mach-tegra/io.c1
-rw-r--r--arch/arm/mach-tegra/sleep.S4
-rw-r--r--arch/arm/mach-u300/core.c2
-rw-r--r--arch/arm/mach-u300/include/mach/io.h20
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h11
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/io.h22
-rw-r--r--arch/arm/mach-versatile/include/mach/io.h28
-rw-r--r--arch/arm/mach-vexpress/include/mach/io.h26
-rw-r--r--arch/arm/mach-vt8500/include/mach/io.h26
-rw-r--r--arch/arm/mach-w90x900/dev.c1
-rw-r--r--arch/arm/mach-w90x900/include/mach/io.h30
-rw-r--r--arch/arm/mm/cache-l2x0.c22
-rw-r--r--arch/arm/mm/copypage-v4mc.c9
-rw-r--r--arch/arm/mm/copypage-v6.c20
-rw-r--r--arch/arm/mm/copypage-xscale.c9
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/flush.c14
-rw-r--r--arch/arm/mm/highmem.c21
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/ioremap.c17
-rw-r--r--arch/arm/mm/mm.h26
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/nommu.c8
-rw-r--r--arch/arm/mm/vmregion.c76
-rw-r--r--arch/arm/mm/vmregion.h5
-rw-r--r--arch/arm/net/Makefile3
-rw-r--r--arch/arm/net/bpf_jit_32.c915
-rw-r--r--arch/arm/net/bpf_jit_32.h190
-rw-r--r--arch/arm/plat-mxc/include/mach/hardware.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/io.h39
-rw-r--r--arch/arm/plat-nomadik/Kconfig1
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h3
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h4
-rw-r--r--arch/arm/plat-omap/include/plat/hardware.h6
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h1
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h1
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c1
-rw-r--r--arch/arm/plat-samsung/dma-ops.c4
-rw-r--r--arch/arm/plat-spear/include/plat/hardware.h6
-rw-r--r--arch/arm/plat-spear/include/plat/io.h22
-rw-r--r--arch/arm/plat-spear/include/plat/keyboard.h7
-rw-r--r--arch/arm/plat-versatile/Kconfig3
-rw-r--r--arch/avr32/include/asm/posix_types.h107
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c13
-rw-r--r--arch/avr32/mach-at32ap/include/mach/atmel-mci.h7
-rw-r--r--arch/c6x/Kconfig2
-rw-r--r--arch/cris/include/asm/posix_types.h50
-rw-r--r--arch/frv/include/asm/posix_types.h53
-rw-r--r--arch/h8300/include/asm/posix_types.h49
-rw-r--r--arch/ia64/include/asm/cmpxchg.h1
-rw-r--r--arch/ia64/include/asm/posix_types.h121
-rw-r--r--arch/ia64/kernel/asm-offsets.c4
-rw-r--r--arch/ia64/kernel/fsys.S2
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h2
-rw-r--r--arch/ia64/kernel/process.c1
-rw-r--r--arch/ia64/kernel/time.c10
-rw-r--r--arch/m32r/include/asm/posix_types.h108
-rw-r--r--arch/m68k/include/asm/posix_types.h53
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c2
-rw-r--r--arch/mips/configs/db1300_defconfig2
-rw-r--r--arch/mips/include/asm/posix_types.h121
-rw-r--r--arch/mips/kernel/kspd.c2
-rw-r--r--arch/mn10300/include/asm/posix_types.h111
-rw-r--r--arch/parisc/include/asm/posix_types.h119
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Kconfig.debug10
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig2
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/posix_types.h118
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/cpu_mf.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/posix_types.h70
-rw-r--r--arch/s390/kernel/lgr.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_event.c1
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/sh/Kconfig6
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c104
-rw-r--r--arch/sh/drivers/dma/dma-g2.c4
-rw-r--r--arch/sh/drivers/dma/dmabrg.c4
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.c15
-rw-r--r--arch/sh/include/asm/io.h25
-rw-r--r--arch/sh/include/asm/irq.h11
-rw-r--r--arch/sh/include/asm/posix_types_32.h5
-rw-r--r--arch/sh/include/asm/posix_types_64.h4
-rw-r--r--arch/sh/include/asm/unistd.h37
-rw-r--r--arch/sh/include/asm/unistd_32.h102
-rw-r--r--arch/sh/include/asm/unistd_64.h106
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h32
-rw-r--r--arch/sh/include/mach-common/mach/mangle-port.h49
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c20
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c10
-rw-r--r--arch/sh/kernel/cpufreq.c121
-rw-r--r--arch/sh/kernel/signal_32.c35
-rw-r--r--arch/sh/kernel/signal_64.c40
-rw-r--r--arch/sh/kernel/syscalls_32.S8
-rw-r--r--arch/sh/kernel/syscalls_64.S8
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/posix_types.h133
-rw-r--r--arch/sparc/include/asm/ptrace.h1
-rw-r--r--arch/sparc/kernel/jump_label.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c1
-rw-r--r--arch/tile/include/asm/compat.h11
-rw-r--r--arch/tile/kernel/compat.c43
-rw-r--r--arch/x86/Kconfig23
-rw-r--r--arch/x86/Makefile16
-rw-r--r--arch/x86/configs/i386_defconfig64
-rw-r--r--arch/x86/configs/x86_64_defconfig67
-rw-r--r--arch/x86/ia32/ia32_signal.c24
-rw-r--r--arch/x86/ia32/sys_ia32.c40
-rw-r--r--arch/x86/include/asm/Kbuild2
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/compat.h40
-rw-r--r--arch/x86/include/asm/elf.h31
-rw-r--r--arch/x86/include/asm/ia32.h18
-rw-r--r--arch/x86/include/asm/idle.h1
-rw-r--r--arch/x86/include/asm/io_apic.h9
-rw-r--r--arch/x86/include/asm/mtrr.h28
-rw-r--r--arch/x86/include/asm/posix_types.h4
-rw-r--r--arch/x86/include/asm/posix_types_32.h75
-rw-r--r--arch/x86/include/asm/posix_types_64.h106
-rw-r--r--arch/x86/include/asm/posix_types_x32.h19
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/ptrace.h1
-rw-r--r--arch/x86/include/asm/sigcontext.h57
-rw-r--r--arch/x86/include/asm/sigframe.h13
-rw-r--r--arch/x86/include/asm/sighandling.h24
-rw-r--r--arch/x86/include/asm/sys_ia32.h7
-rw-r--r--arch/x86/include/asm/syscall.h5
-rw-r--r--arch/x86/include/asm/thread_info.h18
-rw-r--r--arch/x86/include/asm/traps.h25
-rw-r--r--arch/x86/include/asm/unistd.h15
-rw-r--r--arch/x86/include/asm/vgtod.h17
-rw-r--r--arch/x86/include/asm/x2apic.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/apic/apic.c13
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c159
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c4
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/entry_64.S44
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/process.c114
-rw-r--r--arch/x86/kernel/process_32.c58
-rw-r--r--arch/x86/kernel/process_64.c134
-rw-r--r--arch/x86/kernel/ptrace.c102
-rw-r--r--arch/x86/kernel/signal.c140
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
-rw-r--r--arch/x86/kernel/tboot.c9
-rw-r--r--arch/x86/kernel/tls.c4
-rw-r--r--arch/x86/kernel/traps.c133
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c27
-rw-r--r--arch/x86/math-emu/fpu_entry.c5
-rw-r--r--arch/x86/mm/fault.c10
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/platform/olpc/olpc.c97
-rw-r--r--arch/x86/syscalls/Makefile22
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl579
-rw-r--r--arch/x86/um/sys_call_table_64.c3
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--arch/x86/vdso/.gitignore2
-rw-r--r--arch/x86/vdso/Makefile46
-rw-r--r--arch/x86/vdso/vclock_gettime.c135
-rw-r--r--arch/x86/vdso/vdso32-setup.c5
-rw-r--r--arch/x86/vdso/vdsox32.S22
-rw-r--r--arch/x86/vdso/vdsox32.lds.S28
-rw-r--r--arch/x86/vdso/vma.c78
-rw-r--r--arch/xtensa/configs/iss_defconfig2
-rw-r--r--arch/xtensa/include/asm/posix_types.h97
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/accommon.h1
-rw-r--r--drivers/acpi/acpica/acdebug.h8
-rw-r--r--drivers/acpi/acpica/acevents.h21
-rw-r--r--drivers/acpi/acpica/acglobal.h11
-rw-r--r--drivers/acpi/acpica/achware.h32
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acmacros.h6
-rw-r--r--drivers/acpi/acpica/acnamesp.h5
-rw-r--r--drivers/acpi/acpica/actables.h5
-rw-r--r--drivers/acpi/acpica/evevent.c4
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c4
-rw-r--r--drivers/acpi/acpica/evgpeinit.c4
-rw-r--r--drivers/acpi/acpica/evgpeutil.c3
-rw-r--r--drivers/acpi/acpica/evmisc.c26
-rw-r--r--drivers/acpi/acpica/evsci.c4
-rw-r--r--drivers/acpi/acpica/evxface.c436
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c3
-rw-r--r--drivers/acpi/acpica/hwesleep.c247
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c16
-rw-r--r--drivers/acpi/acpica/hwsleep.c401
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c50
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c431
-rw-r--r--drivers/acpi/acpica/nsdump.c15
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c4
-rw-r--r--drivers/acpi/acpica/nsrepair.c159
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c8
-rw-r--r--drivers/acpi/acpica/tbinstal.c117
-rw-r--r--drivers/acpi/acpica/tbutils.c95
-rw-r--r--drivers/acpi/acpica/utdecode.c34
-rw-r--r--drivers/acpi/acpica/utglobal.c9
-rw-r--r--drivers/acpi/acpica/utinit.c37
-rw-r--r--drivers/acpi/acpica/utxface.c6
-rw-r--r--drivers/acpi/apei/apei-base.c61
-rw-r--r--drivers/acpi/apei/cper.c2
-rw-r--r--drivers/acpi/apei/einj.c17
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/bgrt.c175
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/ec.c8
-rw-r--r--drivers/acpi/nvs.c4
-rw-r--r--drivers/acpi/osl.c124
-rw-r--r--drivers/acpi/power.c166
-rw-r--r--drivers/acpi/processor_driver.c62
-rw-r--r--drivers/acpi/processor_idle.c34
-rw-r--r--drivers/acpi/processor_thermal.c45
-rw-r--r--drivers/acpi/processor_throttling.c5
-rw-r--r--drivers/acpi/reboot.c3
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/sleep.c76
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/video.c50
-rw-r--r--drivers/char/lp.c5
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/cpuidle/cpuidle.c97
-rw-r--r--drivers/cpuidle/driver.c2
-rw-r--r--drivers/cpuidle/governors/menu.c7
-rw-r--r--drivers/cpuidle/sysfs.c40
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/amba-pl08x.c46
-rw-r--r--drivers/dma/at_hdmac.c111
-rw-r--r--drivers/dma/at_hdmac_regs.h34
-rw-r--r--drivers/dma/coh901318.c41
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/dmaengine.h89
-rw-r--r--drivers/dma/dw_dmac.c228
-rw-r--r--drivers/dma/dw_dmac_regs.h16
-rw-r--r--drivers/dma/ep93xx_dma.c31
-rw-r--r--drivers/dma/fsldma.c28
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/imx-dma.c950
-rw-r--r--drivers/dma/imx-sdma.c187
-rw-r--r--drivers/dma/intel_mid_dma.c46
-rw-r--r--drivers/dma/intel_mid_dma_regs.h2
-rw-r--r--drivers/dma/ioat/dma.c21
-rw-r--r--drivers/dma/ioat/dma.h23
-rw-r--r--drivers/dma/ioat/dma_v2.c13
-rw-r--r--drivers/dma/ioat/dma_v3.c12
-rw-r--r--drivers/dma/iop-adma.c52
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/dma/mpc512x_dma.c25
-rw-r--r--drivers/dma/mv_xor.c34
-rw-r--r--drivers/dma/mv_xor.h3
-rw-r--r--drivers/dma/mxs-dma.c60
-rw-r--r--drivers/dma/pch_dma.c37
-rw-r--r--drivers/dma/pl330.c2149
-rw-r--r--drivers/dma/ppc4xx/adma.c49
-rw-r--r--drivers/dma/ppc4xx/adma.h2
-rw-r--r--drivers/dma/shdma.c33
-rw-r--r--drivers/dma/shdma.h1
-rw-r--r--drivers/dma/sirf-dma.c27
-rw-r--r--drivers/dma/ste_dma40.c41
-rw-r--r--drivers/dma/timb_dma.c37
-rw-r--r--drivers/dma/txx9dmac.c43
-rw-r--r--drivers/dma/txx9dmac.h1
-rw-r--r--drivers/gpio/gpio-pxa.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/input/input-compat.c4
-rw-r--r--drivers/input/input-compat.h2
-rw-r--r--drivers/input/joystick/amijoy.c3
-rw-r--r--drivers/input/keyboard/gpio_keys.c259
-rw-r--r--drivers/input/keyboard/tegra-kbc.c1
-rw-r--r--drivers/input/mouse/sentelic.c294
-rw-r--r--drivers/input/mouse/sentelic.h35
-rw-r--r--drivers/input/serio/ams_delta_serio.c2
-rw-r--r--drivers/input/tablet/Kconfig1
-rw-r--r--drivers/input/tablet/wacom.h9
-rw-r--r--drivers/input/tablet/wacom_sys.c231
-rw-r--r--drivers/input/tablet/wacom_wac.c49
-rw-r--r--drivers/input/tablet/wacom_wac.h6
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c1
-rw-r--r--drivers/media/video/davinci/vpbe_venc.c1
-rw-r--r--drivers/media/video/mx3_camera.c2
-rw-r--r--drivers/media/video/timblogiw.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c21
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/mxcmmc.c5
-rw-r--r--drivers/mmc/host/mxs-mmc.c14
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mtd/Kconfig3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c83
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c283
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c33
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/chips/fwh_lock.h4
-rw-r--r--drivers/mtd/chips/map_absent.c10
-rw-r--r--drivers/mtd/chips/map_ram.c14
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/block2mtd.c28
-rw-r--r--drivers/mtd/devices/doc2000.c25
-rw-r--r--drivers/mtd/devices/doc2001.c22
-rw-r--r--drivers/mtd/devices/doc2001plus.c22
-rw-r--r--drivers/mtd/devices/docg3.c201
-rw-r--r--drivers/mtd/devices/docg3.h20
-rw-r--r--drivers/mtd/devices/lart.c17
-rw-r--r--drivers/mtd/devices/m25p80.c56
-rw-r--r--drivers/mtd/devices/ms02-nv.c12
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c50
-rw-r--r--drivers/mtd/devices/mtdram.c35
-rw-r--r--drivers/mtd/devices/phram.c76
-rw-r--r--drivers/mtd/devices/pmc551.c99
-rw-r--r--drivers/mtd/devices/slram.c41
-rw-r--r--drivers/mtd/devices/spear_smi.c1147
-rw-r--r--drivers/mtd/devices/sst25l.c46
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c37
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c2
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c4
-rw-r--r--drivers/mtd/maps/h720x-flash.c4
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c5
-rw-r--r--drivers/mtd/maps/l440gx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/pcmciamtd.c13
-rw-r--r--drivers/mtd/maps/physmap.c24
-rw-r--r--drivers/mtd/maps/plat-ram.c5
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c18
-rw-r--r--drivers/mtd/maps/solutionengine.c4
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmu-flash.c14
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/mtdblock.c8
-rw-r--r--drivers/mtd/mtdchar.c4
-rw-r--r--drivers/mtd/mtdconcat.c106
-rw-r--r--drivers/mtd/mtdcore.c271
-rw-r--r--drivers/mtd/mtdoops.c9
-rw-r--r--drivers/mtd/mtdpart.c200
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/alauda.c9
-rw-r--r--drivers/mtd/nand/atmel_nand.c1
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c10
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c3
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c5
-rw-r--r--drivers/mtd/nand/denali.c3
-rw-r--r--drivers/mtd/nand/diskonchip.c1
-rw-r--r--drivers/mtd/nand/docg4.c1377
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/mtd/nand/fsmc_nand.c924
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c43
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c14
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/h1910.c4
-rw-r--r--drivers/mtd/nand/jz4740_nand.c11
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c194
-rw-r--r--drivers/mtd/nand/ndfc.c1
-rw-r--r--drivers/mtd/nand/omap2.c5
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c18
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c1
-rw-r--r--drivers/mtd/nand/rtc_from4.c1
-rw-r--r--drivers/mtd/nand/s3c2410.c5
-rw-r--r--drivers/mtd/nand/sh_flctl.c106
-rw-r--r--drivers/mtd/nand/sharpsl.c5
-rw-r--r--drivers/mtd/nand/tmio_nand.c7
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/nftlcore.c7
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c6
-rw-r--r--drivers/mtd/onenand/onenand_base.c68
-rw-r--r--drivers/mtd/onenand/samsung.c6
-rw-r--r--drivers/mtd/redboot.c6
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/ubi/gluebi.c29
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/sfc/mtd.c10
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/pci/pci-acpi.c40
-rw-r--r--drivers/pcmcia/at91_cf.c52
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c13
-rw-r--r--drivers/pcmcia/db1xxx_ss.c17
-rw-r--r--drivers/pcmcia/electra_cf.c12
-rw-r--r--drivers/pcmcia/i82092.c11
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c13
-rw-r--r--drivers/pcmcia/pd6729.c9
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c13
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c7
-rw-r--r--drivers/pcmcia/xxs1500_ss.c13
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/platform/x86/intel_ips.c13
-rw-r--r--drivers/pnp/pnpacpi/core.c7
-rw-r--r--drivers/power/Kconfig21
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab8500_btemp.c1124
-rw-r--r--drivers/power/ab8500_charger.c2789
-rw-r--r--drivers/power/ab8500_fg.c2637
-rw-r--r--drivers/power/abx500_chargalg.c1921
-rw-r--r--drivers/power/charger-manager.c67
-rw-r--r--drivers/power/da9052-battery.c15
-rw-r--r--drivers/power/ds2782_battery.c13
-rw-r--r--drivers/power/isp1704_charger.c1
-rw-r--r--drivers/power/lp8727_charger.c131
-rw-r--r--drivers/power/max17040_battery.c13
-rw-r--r--drivers/power/max17042_battery.c508
-rw-r--r--drivers/power/sbs-battery.c13
-rw-r--r--drivers/power/smb347-charger.c1294
-rw-r--r--drivers/power/z2_battery.c14
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/sh/intc/balancing.c2
-rw-r--r--drivers/sh/intc/chip.c37
-rw-r--r--drivers/sh/intc/core.c13
-rw-r--r--drivers/sh/intc/handle.c7
-rw-r--r--drivers/sh/intc/internals.h9
-rw-r--r--drivers/sh/intc/virq.c2
-rw-r--r--drivers/spi/spi-dw-mid.c7
-rw-r--r--drivers/spi/spi-ep93xx.c4
-rw-r--r--drivers/spi/spi-pl022.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/staging/android/binder.c13
-rw-r--r--drivers/staging/asus_oled/README2
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/spear_thermal.c206
-rw-r--r--drivers/thermal/thermal_sys.c94
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/sh-sci.c19
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/omap2/vrfb.c1
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/binfmt_elf.c24
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/backref.c122
-rw-r--r--fs/btrfs/backref.h5
-rw-r--r--fs/btrfs/compression.c38
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c384
-rw-r--r--fs/btrfs/ctree.h169
-rw-r--r--fs/btrfs/delayed-inode.c33
-rw-r--r--fs/btrfs/delayed-ref.c33
-rw-r--r--fs/btrfs/dir-item.c10
-rw-r--r--fs/btrfs/disk-io.c649
-rw-r--r--fs/btrfs/disk-io.h10
-rw-r--r--fs/btrfs/export.c2
-rw-r--r--fs/btrfs/extent-tree.c737
-rw-r--r--fs/btrfs/extent_io.c1035
-rw-r--r--fs/btrfs/extent_io.h62
-rw-r--r--fs/btrfs/file-item.c57
-rw-r--r--fs/btrfs/file.c52
-rw-r--r--fs/btrfs/free-space-cache.c15
-rw-r--r--fs/btrfs/inode-item.c6
-rw-r--r--fs/btrfs/inode-map.c25
-rw-r--r--fs/btrfs/inode.c457
-rw-r--r--fs/btrfs/ioctl.c194
-rw-r--r--fs/btrfs/locking.c6
-rw-r--r--fs/btrfs/locking.h4
-rw-r--r--fs/btrfs/ordered-data.c60
-rw-r--r--fs/btrfs/ordered-data.h24
-rw-r--r--fs/btrfs/orphan.c2
-rw-r--r--fs/btrfs/reada.c10
-rw-r--r--fs/btrfs/relocation.c130
-rw-r--r--fs/btrfs/root-tree.c25
-rw-r--r--fs/btrfs/scrub.c1407
-rw-r--r--fs/btrfs/struct-funcs.c53
-rw-r--r--fs/btrfs/super.c192
-rw-r--r--fs/btrfs/transaction.c213
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/tree-log.c96
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c240
-rw-r--r--fs/btrfs/volumes.h4
-rw-r--r--fs/cifs/cifs_debug.c68
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifsfs.c13
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h39
-rw-r--r--fs/cifs/cifsproto.h20
-rw-r--r--fs/cifs/cifssmb.c126
-rw-r--r--fs/cifs/connect.c1454
-rw-r--r--fs/cifs/file.c271
-rw-r--r--fs/cifs/misc.c100
-rw-r--r--fs/cifs/netmisc.c3
-rw-r--r--fs/cifs/transport.c227
-rw-r--r--fs/compat.c26
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext4/dir.c214
-rw-r--r--fs/ext4/ext4.h6
-rw-r--r--fs/ext4/hash.c4
-rw-r--r--fs/ext4/page-io.c7
-rw-r--r--fs/fcntl.c18
-rw-r--r--fs/file.c52
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/background.c29
-rw-r--r--fs/jffs2/build.c6
-rw-r--r--fs/jffs2/compr.c32
-rw-r--r--fs/jffs2/compr_lzo.c1
-rw-r--r--fs/jffs2/compr_rubin.c2
-rw-r--r--fs/jffs2/compr_zlib.c45
-rw-r--r--fs/jffs2/debug.c22
-rw-r--r--fs/jffs2/debug.h50
-rw-r--r--fs/jffs2/dir.c41
-rw-r--r--fs/jffs2/erase.c72
-rw-r--r--fs/jffs2/file.c33
-rw-r--r--fs/jffs2/fs.c67
-rw-r--r--fs/jffs2/gc.c322
-rw-r--r--fs/jffs2/malloc.c2
-rw-r--r--fs/jffs2/nodelist.c30
-rw-r--r--fs/jffs2/nodemgmt.c214
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jffs2/read.c70
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/scan.c229
-rw-r--r--fs/jffs2/security.c4
-rw-r--r--fs/jffs2/summary.c16
-rw-r--r--fs/jffs2/super.c30
-rw-r--r--fs/jffs2/symlink.c7
-rw-r--r--fs/jffs2/wbuf.c148
-rw-r--r--fs/jffs2/write.c113
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/nfsd/current_stateid.h28
-rw-r--r--fs/nfsd/export.c2
-rw-r--r--fs/nfsd/netns.h34
-rw-r--r--fs/nfsd/nfs4callback.c19
-rw-r--r--fs/nfsd/nfs4idmap.c53
-rw-r--r--fs/nfsd/nfs4proc.c118
-rw-r--r--fs/nfsd/nfs4recover.c647
-rw-r--r--fs/nfsd/nfs4state.c365
-rw-r--r--fs/nfsd/nfs4xdr.c132
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/nfsd/nfsd.h7
-rw-r--r--fs/nfsd/nfssvc.c44
-rw-r--r--fs/nfsd/state.h47
-rw-r--r--fs/nfsd/vfs.c33
-rw-r--r--fs/nfsd/vfs.h2
-rw-r--r--fs/nfsd/xdr4.h34
-rw-r--r--fs/open.c4
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/romfs/storage.c2
-rw-r--r--fs/select.c2
-rw-r--r--include/acpi/acconfig.h (renamed from drivers/acpi/acpica/acconfig.h)19
-rw-r--r--include/acpi/acexcep.h7
-rw-r--r--include/acpi/acnames.h12
-rw-r--r--include/acpi/acpi_bus.h7
-rw-r--r--include/acpi/acpiosxf.h13
-rw-r--r--include/acpi/acpixf.h229
-rw-r--r--include/acpi/actbl.h7
-rw-r--r--include/acpi/actypes.h22
-rw-r--r--include/asm-generic/posix_types.h109
-rw-r--r--include/asm-generic/unistd.h2
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h10
-rw-r--r--include/linux/aio_abi.h2
-rw-r--r--include/linux/amba/pl08x.h10
-rw-r--r--include/linux/amba/pl330.h1
-rw-r--r--include/linux/compat.h32
-rw-r--r--include/linux/cpuidle.h22
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dmaengine.h35
-rw-r--r--include/linux/dw_dmac.h38
-rw-r--r--include/linux/fdtable.h46
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/fsl/mxs-dma.h (renamed from arch/arm/mach-mxs/include/mach/dma.h)0
-rw-r--r--include/linux/gpio_keys.h3
-rw-r--r--include/linux/kernel.h21
-rw-r--r--include/linux/lp8727.h18
-rw-r--r--include/linux/mfd/abx500.h273
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h474
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h38
-rw-r--r--include/linux/mtd/bbm.h5
-rw-r--r--include/linux/mtd/blktrans.h1
-rw-r--r--include/linux/mtd/fsmc.h169
-rw-r--r--include/linux/mtd/mtd.h304
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/pmc551.h78
-rw-r--r--include/linux/mtd/sh_flctl.h40
-rw-r--r--include/linux/mtd/spear_smi.h65
-rw-r--r--include/linux/nfs4.h15
-rw-r--r--include/linux/nfsd/cld.h56
-rw-r--r--include/linux/platform_data/spear_thermal.h (renamed from arch/arm/mach-zynq/include/mach/io.h)33
-rw-r--r--include/linux/power/max17042_battery.h93
-rw-r--r--include/linux/power/smb347-charger.h117
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/sh_intc.h17
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h4
-rw-r--r--include/linux/sysinfo.h24
-rw-r--r--include/linux/tboot.h1
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/trace/events/btrfs.h44
-rw-r--r--ipc/compat.c70
-rw-r--r--kernel/Kconfig.locks4
-rw-r--r--kernel/Kconfig.preempt1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/compat.c68
-rw-r--r--kernel/cpuset.c21
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/Kconfig15
-rw-r--r--kernel/irq/irqdomain.c8
-rw-r--r--kernel/sched/core.c62
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/spinlock.c2
-rw-r--r--kernel/time.c6
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/ntp.c134
-rw-r--r--kernel/time/timekeeping.c51
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/compat.c65
-rw-r--r--net/socket.c18
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/svcauth_unix.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c66
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c20
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c26
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h7
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--scripts/gcc-goto.sh18
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c3
-rw-r--r--sound/arm/pxa2xx-ac97.c1
-rw-r--r--sound/atmel/abdac.c18
-rw-r--r--sound/atmel/ac97c.c41
-rw-r--r--sound/core/seq/seq_dummy.c2
-rw-r--r--sound/drivers/Kconfig3
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c2
-rw-r--r--sound/oss/msnd_pinnacle.c2
-rw-r--r--sound/pci/asihpi/hpi_internal.h2
-rw-r--r--sound/pci/asihpi/hpios.c2
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c3
-rw-r--r--sound/soc/mxs/mxs-pcm.c2
-rw-r--r--sound/soc/mxs/mxs-saif.c2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c1
-rw-r--r--sound/soc/sh/siu_pcm.c4
-rw-r--r--sound/soc/soc-dmaengine-pcm.c2
-rw-r--r--sound/soc/txx9/txx9aclc.c2
-rw-r--r--tools/power/cpupower/Makefile93
-rw-r--r--tools/power/cpupower/bench/Makefile23
-rw-r--r--tools/power/cpupower/debug/i386/Makefile40
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile26
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-info.14
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-set.14
-rw-r--r--tools/power/cpupower/man/cpupower-idle-info.190
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.12
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c12
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c4
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h11
-rw-r--r--tools/power/cpupower/utils/helpers/pci.c35
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c25
-rw-r--r--tools/power/x86/turbostat/turbostat.899
-rw-r--r--tools/power/x86/turbostat/turbostat.c245
1102 files changed, 38991 insertions, 17986 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index 23a43b8207e6..2a7f9a00cb0a 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -55,7 +55,7 @@ What: /sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
Date: August 2008
Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Description:
- This file determines if the the transaction of the USB TMC
+ This file determines if the transaction of the USB TMC
device is to be automatically aborted if there is any error.
For more details about this, please see the document,
"Universal Serial Bus Test and Measurement Class Specification
diff --git a/Documentation/ABI/testing/debugfs-olpc b/Documentation/ABI/testing/debugfs-olpc
new file mode 100644
index 000000000000..bd76cc6d55f9
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-olpc
@@ -0,0 +1,16 @@
+What: /sys/kernel/debug/olpc-ec/cmd
+Date: Dec 2011
+KernelVersion: 3.4
+Contact: devel@lists.laptop.org
+Description:
+
+A generic interface for executing OLPC Embedded Controller commands and
+reading their responses.
+
+To execute a command, write data with the format: CC:N A A A A
+CC is the (hex) command, N is the count of expected reply bytes, and A A A A
+are optional (hex) arguments.
+
+To read the response (if any), read from the generic node after executing
+a command. Hex reply bytes will be returned, *whether or not* they came from
+the immediately previous command.
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
index 4f9ba3c2fca7..dd930c8db41f 100644
--- a/Documentation/ABI/testing/sysfs-firmware-acpi
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -1,3 +1,23 @@
+What: /sys/firmware/acpi/bgrt/
+Date: January 2012
+Contact: Matthew Garrett <mjg@redhat.com>
+Description:
+ The BGRT is an ACPI 5.0 feature that allows the OS
+ to obtain a copy of the firmware boot splash and
+ some associated metadata. This is intended to be used
+ by boot splash applications in order to interact with
+ the firmware boot splash in order to avoid jarring
+ transitions.
+
+ image: The image bitmap. Currently a 32-bit BMP.
+ status: 1 if the image is valid, 0 if firmware invalidated it.
+ type: 0 indicates image is in BMP format.
+ version: The version of the BGRT. Currently 1.
+ xoffset: The number of pixels between the left of the screen
+ and the left edge of the image.
+ yoffset: The number of pixels between the top of the screen
+ and the top edge of the image.
+
What: /sys/firmware/acpi/interrupts/
Date: February 2008
Contact: Len Brown <lenb@kernel.org>
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 2b90d328b3ba..c58b236bbe04 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -793,6 +793,35 @@ own custom mode, or may have some other magic method for making indentation
work correctly.
+ Chapter 19: Inline assembly
+
+In architecture-specific code, you may need to use inline assembly to interface
+with CPU or platform functionality. Don't hesitate to do so when necessary.
+However, don't use inline assembly gratuitously when C can do the job. You can
+and should poke hardware from C when possible.
+
+Consider writing simple helper functions that wrap common bits of inline
+assembly, rather than repeatedly writing them with slight variations. Remember
+that inline assembly can use C parameters.
+
+Large, non-trivial assembly functions should go in .S files, with corresponding
+C prototypes defined in C header files. The C prototypes for assembly
+functions should use "asmlinkage".
+
+You may need to mark your asm statement as volatile, to prevent GCC from
+removing it if GCC doesn't notice any side effects. You don't always need to
+do so, though, and doing so unnecessarily can limit optimization.
+
+When writing a single inline assembly statement containing multiple
+instructions, put each instruction on a separate line in a separate quoted
+string, and end each string except the last with \n\t to properly indent the
+next instruction in the assembly output:
+
+ asm ("magic %reg1, #42\n\t"
+ "more_magic %reg2, %reg3"
+ : /* outputs */ : /* inputs */ : /* clobbers */);
+
+
Appendix I: References
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index e7cc36397217..e20b6daaced4 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -53,6 +53,14 @@ directory apei/einj. The following files are provided.
This file is used to set the second error parameter value. Effect of
parameter depends on error_type specified.
+- notrigger
+ The EINJ mechanism is a two step process. First inject the error, then
+ perform some actions to trigger it. Setting "notrigger" to 1 skips the
+ trigger phase, which *may* allow the user to cause the error in some other
+ context by a simple access to the cpu, memory location, or device that is
+ the target of the error injection. Whether this actually works depends
+ on what operations the BIOS actually includes in the trigger phase.
+
BIOS versions based in the ACPI 4.0 specification have limited options
to control where the errors are injected. Your BIOS may support an
extension (enabled with the param_extension=1 module parameter, or
diff --git a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt
index b5aada9f20cc..5f5aa16047ff 100644
--- a/Documentation/aoe/aoe.txt
+++ b/Documentation/aoe/aoe.txt
@@ -35,7 +35,7 @@ CREATING DEVICE NODES
sh Documentation/aoe/mkshelf.sh /dev/etherd 0
There is also an autoload script that shows how to edit
- /etc/modprobe.conf to ensure that the aoe module is loaded when
+ /etc/modprobe.d/aoe.conf to ensure that the aoe module is loaded when
necessary.
USING DEVICE NODES
diff --git a/Documentation/aoe/autoload.sh b/Documentation/aoe/autoload.sh
index 78dad1334c6f..815dff4691c9 100644
--- a/Documentation/aoe/autoload.sh
+++ b/Documentation/aoe/autoload.sh
@@ -1,8 +1,8 @@
#!/bin/sh
# set aoe to autoload by installing the
-# aliases in /etc/modprobe.conf
+# aliases in /etc/modprobe.d/
-f=/etc/modprobe.conf
+f=/etc/modprobe.d/aoe.conf
if test ! -r $f || test ! -w $f; then
echo "cannot configure $f for module autoloading" 1>&2
diff --git a/Documentation/blockdev/floppy.txt b/Documentation/blockdev/floppy.txt
index 6ccab88705cb..470fe4b5e379 100644
--- a/Documentation/blockdev/floppy.txt
+++ b/Documentation/blockdev/floppy.txt
@@ -49,7 +49,7 @@ you can put:
options floppy omnibook messages
-in /etc/modprobe.conf.
+in a configuration file in /etc/modprobe.d/.
The floppy driver related options are:
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
index 50d7b1642759..9d28a3406e74 100644
--- a/Documentation/cpuidle/sysfs.txt
+++ b/Documentation/cpuidle/sysfs.txt
@@ -36,6 +36,7 @@ drwxr-xr-x 2 root root 0 Feb 8 10:42 state3
/sys/devices/system/cpu/cpu0/cpuidle/state0:
total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -45,6 +46,7 @@ total 0
/sys/devices/system/cpu/cpu0/cpuidle/state1:
total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -54,6 +56,7 @@ total 0
/sys/devices/system/cpu/cpu0/cpuidle/state2:
total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -63,6 +66,7 @@ total 0
/sys/devices/system/cpu/cpu0/cpuidle/state3:
total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -72,6 +76,7 @@ total 0
* desc : Small description about the idle state (string)
+* disable : Option to disable this idle state (bool)
* latency : Latency to exit out of this idle state (in microseconds)
* name : Name of the idle state (string)
* power : Power consumed while in this idle state (in milliwatts)
diff --git a/Documentation/devicetree/bindings/mtd/arm-versatile.txt b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
index 476845db94d0..beace4b89daa 100644
--- a/Documentation/devicetree/bindings/mtd/arm-versatile.txt
+++ b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
@@ -4,5 +4,5 @@ Required properties:
- compatible : must be "arm,versatile-flash";
- bank-width : width in bytes of flash interface.
-Optional properties:
-- Subnode partition map from mtd flash binding
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
diff --git a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
index ef66ddd01da0..1889a4db5b7c 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
@@ -3,6 +3,9 @@
Required properties:
- compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash".
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
Example:
flash@1 {
diff --git a/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
index 00f1f546b32e..fce4894f5a98 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
@@ -19,6 +19,10 @@ Optional properties:
read registers (tR). Required if property "gpios" is not used
(R/B# pins not connected).
+Each flash chip described may optionally contain additional sub-nodes
+describing partitions of the address space. See partition.txt for more
+detail.
+
Examples:
upm@1,0 {
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
new file mode 100644
index 000000000000..e2c663b354d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
@@ -0,0 +1,33 @@
+* FSMC NAND
+
+Required properties:
+- compatible : "st,spear600-fsmc-nand"
+- reg : Address range of the mtd chip
+- reg-names: Should contain the reg names "fsmc_regs" and "nand_data"
+- st,ale-off : Chip specific offset to ALE
+- st,cle-off : Chip specific offset to CLE
+
+Optional properties:
+- bank-width : Width (in bytes) of the device. If not present, the width
+ defaults to 1 byte
+- nand-skip-bbtscan: Indicates the the BBT scanning should be skipped
+
+Example:
+
+ fsmc: flash@d1800000 {
+ compatible = "st,spear600-fsmc-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xd1800000 0x1000 /* FSMC Register */
+ 0xd2000000 0x4000>; /* NAND Base */
+ reg-names = "fsmc_regs", "nand_data";
+ st,ale-off = <0x20000>;
+ st,cle-off = <0x10000>;
+
+ bank-width = <1>;
+ nand-skip-bbtscan;
+
+ partition@0 {
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
index 719f4dc58df7..36ef07d3c90f 100644
--- a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
@@ -25,6 +25,9 @@ Optional properties:
GPIO state and before and after command byte writes, this register will be
read to ensure that the GPIO accesses have completed.
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
Examples:
gpio-nand@1,0 {
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 80152cb567d9..a63c2bd7de2b 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -23,27 +23,8 @@ are defined:
- vendor-id : Contains the flash chip's vendor id (1 byte).
- device-id : Contains the flash chip's device id (1 byte).
-In addition to the information on the mtd bank itself, the
-device tree may optionally contain additional information
-describing partitions of the address space. This can be
-used on platforms which have strong conventions about which
-portions of a flash are used for what purposes, but which don't
-use an on-flash partition table such as RedBoot.
-
-Each partition is represented as a sub-node of the mtd device.
-Each node's name represents the name of the corresponding
-partition of the mtd device.
-
-Flash partitions
- - reg : The partition's offset and size within the mtd bank.
- - label : (optional) The label / name for this partition.
- If omitted, the label is taken from the node name (excluding
- the unit address).
- - read-only : (optional) This parameter, if present, is a hint to
- Linux that this partition should only be mounted
- read-only. This is usually used for flash partitions
- containing early-boot firmware images or data which should not
- be clobbered.
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
Example:
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
new file mode 100644
index 000000000000..f114ce1657c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -0,0 +1,38 @@
+Representing flash partitions in devicetree
+
+Partitions can be represented by sub-nodes of an mtd device. This can be used
+on platforms which have strong conventions about which portions of a flash are
+used for what purposes, but which don't use an on-flash partition table such
+as RedBoot.
+
+#address-cells & #size-cells must both be present in the mtd device and be
+equal to 1.
+
+Required properties:
+- reg : The partition's offset and size within the mtd bank.
+
+Optional properties:
+- label : The label / name for this partition. If omitted, the label is taken
+ from the node name (excluding the unit address).
+- read-only : This parameter, if present, is a hint to Linux that this
+ partition should only be mounted read-only. This is usually used for flash
+ partitions containing early-boot firmware images or data which should not be
+ clobbered.
+
+Examples:
+
+
+flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ uimage@100000 {
+ reg = <0x0100000 0x200000>;
+ };
+];
diff --git a/Documentation/devicetree/bindings/mtd/spear_smi.txt b/Documentation/devicetree/bindings/mtd/spear_smi.txt
new file mode 100644
index 000000000000..7248aadd89e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/spear_smi.txt
@@ -0,0 +1,31 @@
+* SPEAr SMI
+
+Required properties:
+- compatible : "st,spear600-smi"
+- reg : Address range of the mtd chip
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+ representing partitions.
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupts: Should contain the STMMAC interrupts
+- clock-rate : Functional clock rate of SMI in Hz
+
+Optional properties:
+- st,smi-fast-mode : Flash supports read in fast mode
+
+Example:
+
+ smi: flash@fc000000 {
+ compatible = "st,spear600-smi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xfc000000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <12>;
+ clock-rate = <50000000>; /* 50MHz */
+
+ flash@f8000000 {
+ st,smi-fast-mode;
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power_supply/max17042_battery.txt b/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
new file mode 100644
index 000000000000..5bc9b685cf8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
@@ -0,0 +1,18 @@
+max17042_battery
+~~~~~~~~~~~~~~~~
+
+Required properties :
+ - compatible : "maxim,max17042"
+
+Optional properties :
+ - maxim,rsns-microohm : Resistance of rsns resistor in micro Ohms
+ (datasheet-recommended value is 10000).
+ Defining this property enables current-sense functionality.
+
+Example:
+
+ battery-charger@36 {
+ compatible = "maxim,max17042";
+ reg = <0x36>;
+ maxim,rsns-microohm = <10000>;
+ };
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt
new file mode 100644
index 000000000000..c5a80099b71c
--- /dev/null
+++ b/Documentation/devicetree/usage-model.txt
@@ -0,0 +1,412 @@
+Linux and the Device Tree
+-------------------------
+The Linux usage model for device tree data
+
+Author: Grant Likely <grant.likely@secretlab.ca>
+
+This article describes how Linux uses the device tree. An overview of
+the device tree data format can be found on the device tree usage page
+at devicetree.org[1].
+
+[1] http://devicetree.org/Device_Tree_Usage
+
+The "Open Firmware Device Tree", or simply Device Tree (DT), is a data
+structure and language for describing hardware. More specifically, it
+is a description of hardware that is readable by an operating system
+so that the operating system doesn't need to hard code details of the
+machine.
+
+Structurally, the DT is a tree, or acyclic graph with named nodes, and
+nodes may have an arbitrary number of named properties encapsulating
+arbitrary data. A mechanism also exists to create arbitrary
+links from one node to another outside of the natural tree structure.
+
+Conceptually, a common set of usage conventions, called 'bindings',
+is defined for how data should appear in the tree to describe typical
+hardware characteristics including data busses, interrupt lines, GPIO
+connections, and peripheral devices.
+
+As much as possible, hardware is described using existing bindings to
+maximize use of existing support code, but since property and node
+names are simply text strings, it is easy to extend existing bindings
+or create new ones by defining new nodes and properties. Be wary,
+however, of creating a new binding without first doing some homework
+about what already exists. There are currently two different,
+incompatible, bindings for i2c busses that came about because the new
+binding was created without first investigating how i2c devices were
+already being enumerated in existing systems.
+
+1. History
+----------
+The DT was originally created by Open Firmware as part of the
+communication method for passing data from Open Firmware to a client
+program (like to an operating system). An operating system used the
+Device Tree to discover the topology of the hardware at runtime, and
+thereby support a majority of available hardware without hard coded
+information (assuming drivers were available for all devices).
+
+Since Open Firmware is commonly used on PowerPC and SPARC platforms,
+the Linux support for those architectures has for a long time used the
+Device Tree.
+
+In 2005, when PowerPC Linux began a major cleanup and to merge 32-bit
+and 64-bit support, the decision was made to require DT support on all
+powerpc platforms, regardless of whether or not they used Open
+Firmware. To do this, a DT representation called the Flattened Device
+Tree (FDT) was created which could be passed to the kernel as a binary
+blob without requiring a real Open Firmware implementation. U-Boot,
+kexec, and other bootloaders were modified to support both passing a
+Device Tree Binary (dtb) and to modify a dtb at boot time. DT was
+also added to the PowerPC boot wrapper (arch/powerpc/boot/*) so that
+a dtb could be wrapped up with the kernel image to support booting
+existing non-DT aware firmware.
+
+Some time later, FDT infrastructure was generalized to be usable by
+all architectures. At the time of this writing, 6 mainlined
+architectures (arm, microblaze, mips, powerpc, sparc, and x86) and 1
+out of mainline (nios) have some level of DT support.
+
+2. Data Model
+-------------
+If you haven't already read the Device Tree Usage[1] page,
+then go read it now. It's okay, I'll wait....
+
+2.1 High Level View
+-------------------
+The most important thing to understand is that the DT is simply a data
+structure that describes the hardware. There is nothing magical about
+it, and it doesn't magically make all hardware configuration problems
+go away. What it does do is provide a language for decoupling the
+hardware configuration from the board and device driver support in the
+Linux kernel (or any other operating system for that matter). Using
+it allows board and device support to become data driven; to make
+setup decisions based on data passed into the kernel instead of on
+per-machine hard coded selections.
+
+Ideally, data driven platform setup should result in less code
+duplication and make it easier to support a wide range of hardware
+with a single kernel image.
+
+Linux uses DT data for three major purposes:
+1) platform identification,
+2) runtime configuration, and
+3) device population.
+
+2.2 Platform Identification
+---------------------------
+First and foremost, the kernel will use data in the DT to identify the
+specific machine. In a perfect world, the specific platform shouldn't
+matter to the kernel because all platform details would be described
+perfectly by the device tree in a consistent and reliable manner.
+Hardware is not perfect though, and so the kernel must identify the
+machine during early boot so that it has the opportunity to run
+machine-specific fixups.
+
+In the majority of cases, the machine identity is irrelevant, and the
+kernel will instead select setup code based on the machine's core
+CPU or SoC. On ARM for example, setup_arch() in
+arch/arm/kernel/setup.c will call setup_machine_fdt() in
+arch/arm/kernel/devicetree.c which searches through the machine_desc
+table and selects the machine_desc which best matches the device tree
+data. It determines the best match by looking at the 'compatible'
+property in the root device tree node, and comparing it with the
+dt_compat list in struct machine_desc.
+
+The 'compatible' property contains a sorted list of strings starting
+with the exact name of the machine, followed by an optional list of
+boards it is compatible with sorted from most compatible to least. For
+example, the root compatible properties for the TI BeagleBoard and its
+successor, the BeagleBoard xM board might look like:
+
+ compatible = "ti,omap3-beagleboard", "ti,omap3450", "ti,omap3";
+ compatible = "ti,omap3-beagleboard-xm", "ti,omap3450", "ti,omap3";
+
+Where "ti,omap3-beagleboard-xm" specifies the exact model, it also
+claims that it compatible with the OMAP 3450 SoC, and the omap3 family
+of SoCs in general. You'll notice that the list is sorted from most
+specific (exact board) to least specific (SoC family).
+
+Astute readers might point out that the Beagle xM could also claim
+compatibility with the original Beagle board. However, one should be
+cautioned about doing so at the board level since there is typically a
+high level of change from one board to another, even within the same
+product line, and it is hard to nail down exactly what is meant when one
+board claims to be compatible with another. For the top level, it is
+better to err on the side of caution and not claim one board is
+compatible with another. The notable exception would be when one
+board is a carrier for another, such as a CPU module attached to a
+carrier board.
+
+One more note on compatible values. Any string used in a compatible
+property must be documented as to what it indicates. Add
+documentation for compatible strings in Documentation/devicetree/bindings.
+
+Again on ARM, for each machine_desc, the kernel looks to see if
+any of the dt_compat list entries appear in the compatible property.
+If one does, then that machine_desc is a candidate for driving the
+machine. After searching the entire table of machine_descs,
+setup_machine_fdt() returns the 'most compatible' machine_desc based
+on which entry in the compatible property each machine_desc matches
+against. If no matching machine_desc is found, then it returns NULL.
+
+The reasoning behind this scheme is the observation that in the majority
+of cases, a single machine_desc can support a large number of boards
+if they all use the same SoC, or same family of SoCs. However,
+invariably there will be some exceptions where a specific board will
+require special setup code that is not useful in the generic case.
+Special cases could be handled by explicitly checking for the
+troublesome board(s) in generic setup code, but doing so very quickly
+becomes ugly and/or unmaintainable if it is more than just a couple of
+cases.
+
+Instead, the compatible list allows a generic machine_desc to provide
+support for a wide common set of boards by specifying "less
+compatible" value in the dt_compat list. In the example above,
+generic board support can claim compatibility with "ti,omap3" or
+"ti,omap3450". If a bug was discovered on the original beagleboard
+that required special workaround code during early boot, then a new
+machine_desc could be added which implements the workarounds and only
+matches on "ti,omap3-beagleboard".
+
+PowerPC uses a slightly different scheme where it calls the .probe()
+hook from each machine_desc, and the first one returning TRUE is used.
+However, this approach does not take into account the priority of the
+compatible list, and probably should be avoided for new architecture
+support.
+
+2.3 Runtime configuration
+-------------------------
+In most cases, a DT will be the sole method of communicating data from
+firmware to the kernel, so also gets used to pass in runtime and
+configuration data like the kernel parameters string and the location
+of an initrd image.
+
+Most of this data is contained in the /chosen node, and when booting
+Linux it will look something like this:
+
+ chosen {
+ bootargs = "console=ttyS0,115200 loglevel=8";
+ initrd-start = <0xc8000000>;
+ initrd-end = <0xc8200000>;
+ };
+
+The bootargs property contains the kernel arguments, and the initrd-*
+properties define the address and size of an initrd blob. The
+chosen node may also optionally contain an arbitrary number of
+additional properties for platform-specific configuration data.
+
+During early boot, the architecture setup code calls of_scan_flat_dt()
+several times with different helper callbacks to parse device tree
+data before paging is setup. The of_scan_flat_dt() code scans through
+the device tree and uses the helpers to extract information required
+during early boot. Typically the early_init_dt_scan_chosen() helper
+is used to parse the chosen node including kernel parameters,
+early_init_dt_scan_root() to initialize the DT address space model,
+and early_init_dt_scan_memory() to determine the size and
+location of usable RAM.
+
+On ARM, the function setup_machine_fdt() is responsible for early
+scanning of the device tree after selecting the correct machine_desc
+that supports the board.
+
+2.4 Device population
+---------------------
+After the board has been identified, and after the early configuration data
+has been parsed, then kernel initialization can proceed in the normal
+way. At some point in this process, unflatten_device_tree() is called
+to convert the data into a more efficient runtime representation.
+This is also when machine-specific setup hooks will get called, like
+the machine_desc .init_early(), .init_irq() and .init_machine() hooks
+on ARM. The remainder of this section uses examples from the ARM
+implementation, but all architectures will do pretty much the same
+thing when using a DT.
+
+As can be guessed by the names, .init_early() is used for any machine-
+specific setup that needs to be executed early in the boot process,
+and .init_irq() is used to set up interrupt handling. Using a DT
+doesn't materially change the behaviour of either of these functions.
+If a DT is provided, then both .init_early() and .init_irq() are able
+to call any of the DT query functions (of_* in include/linux/of*.h) to
+get additional data about the platform.
+
+The most interesting hook in the DT context is .init_machine() which
+is primarily responsible for populating the Linux device model with
+data about the platform. Historically this has been implemented on
+embedded platforms by defining a set of static clock structures,
+platform_devices, and other data in the board support .c file, and
+registering it en-masse in .init_machine(). When DT is used, then
+instead of hard coding static devices for each platform, the list of
+devices can be obtained by parsing the DT, and allocating device
+structures dynamically.
+
+The simplest case is when .init_machine() is only responsible for
+registering a block of platform_devices. A platform_device is a concept
+used by Linux for memory or I/O mapped devices which cannot be detected
+by hardware, and for 'composite' or 'virtual' devices (more on those
+later). While there is no 'platform device' terminology for the DT,
+platform devices roughly correspond to device nodes at the root of the
+tree and children of simple memory mapped bus nodes.
+
+About now is a good time to lay out an example. Here is part of the
+device tree for the NVIDIA Tegra board.
+
+/{
+ compatible = "nvidia,harmony", "nvidia,tegra20";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen { };
+ aliases { };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>;
+ };
+
+ soc {
+ compatible = "nvidia,tegra20-soc", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ intc: interrupt-controller@50041000 {
+ compatible = "nvidia,tegra20-gic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x50041000 0x1000>, < 0x50040100 0x0100 >;
+ };
+
+ serial@70006300 {
+ compatible = "nvidia,tegra20-uart";
+ reg = <0x70006300 0x100>;
+ interrupts = <122>;
+ };
+
+ i2s1: i2s@70002800 {
+ compatible = "nvidia,tegra20-i2s";
+ reg = <0x70002800 0x100>;
+ interrupts = <77>;
+ codec = <&wm8903>;
+ };
+
+ i2c@7000c000 {
+ compatible = "nvidia,tegra20-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x7000c000 0x100>;
+ interrupts = <70>;
+
+ wm8903: codec@1a {
+ compatible = "wlf,wm8903";
+ reg = <0x1a>;
+ interrupts = <347>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "nvidia,harmony-sound";
+ i2s-controller = <&i2s1>;
+ i2s-codec = <&wm8903>;
+ };
+};
+
+At .machine_init() time, Tegra board support code will need to look at
+this DT and decide which nodes to create platform_devices for.
+However, looking at the tree, it is not immediately obvious what kind
+of device each node represents, or even if a node represents a device
+at all. The /chosen, /aliases, and /memory nodes are informational
+nodes that don't describe devices (although arguably memory could be
+considered a device). The children of the /soc node are memory mapped
+devices, but the codec@1a is an i2c device, and the sound node
+represents not a device, but rather how other devices are connected
+together to create the audio subsystem. I know what each device is
+because I'm familiar with the board design, but how does the kernel
+know what to do with each node?
+
+The trick is that the kernel starts at the root of the tree and looks
+for nodes that have a 'compatible' property. First, it is generally
+assumed that any node with a 'compatible' property represents a device
+of some kind, and second, it can be assumed that any node at the root
+of the tree is either directly attached to the processor bus, or is a
+miscellaneous system device that cannot be described any other way.
+For each of these nodes, Linux allocates and registers a
+platform_device, which in turn may get bound to a platform_driver.
+
+Why is using a platform_device for these nodes a safe assumption?
+Well, for the way that Linux models devices, just about all bus_types
+assume that its devices are children of a bus controller. For
+example, each i2c_client is a child of an i2c_master. Each spi_device
+is a child of an SPI bus. Similarly for USB, PCI, MDIO, etc. The
+same hierarchy is also found in the DT, where I2C device nodes only
+ever appear as children of an I2C bus node. Ditto for SPI, MDIO, USB,
+etc. The only devices which do not require a specific type of parent
+device are platform_devices (and amba_devices, but more on that
+later), which will happily live at the base of the Linux /sys/devices
+tree. Therefore, if a DT node is at the root of the tree, then it
+really probably is best registered as a platform_device.
+
+Linux board support code calls of_platform_populate(NULL, NULL, NULL)
+to kick off discovery of devices at the root of the tree. The
+parameters are all NULL because when starting from the root of the
+tree, there is no need to provide a starting node (the first NULL), a
+parent struct device (the last NULL), and we're not using a match
+table (yet). For a board that only needs to register devices,
+.init_machine() can be completely empty except for the
+of_platform_populate() call.
+
+In the Tegra example, this accounts for the /soc and /sound nodes, but
+what about the children of the SoC node? Shouldn't they be registered
+as platform devices too? For Linux DT support, the generic behaviour
+is for child devices to be registered by the parent's device driver at
+driver .probe() time. So, an i2c bus device driver will register a
+i2c_client for each child node, an SPI bus driver will register
+its spi_device children, and similarly for other bus_types.
+According to that model, a driver could be written that binds to the
+SoC node and simply registers platform_devices for each of its
+children. The board support code would allocate and register an SoC
+device, a (theoretical) SoC device driver could bind to the SoC device,
+and register platform_devices for /soc/interrupt-controller, /soc/serial,
+/soc/i2s, and /soc/i2c in its .probe() hook. Easy, right?
+
+Actually, it turns out that registering children of some
+platform_devices as more platform_devices is a common pattern, and the
+device tree support code reflects that and makes the above example
+simpler. The second argument to of_platform_populate() is an
+of_device_id table, and any node that matches an entry in that table
+will also get its child nodes registered. In the tegra case, the code
+can look something like this:
+
+static void __init harmony_init_machine(void)
+{
+ /* ... */
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+"simple-bus" is defined in the ePAPR 1.0 specification as a property
+meaning a simple memory mapped bus, so the of_platform_populate() code
+could be written to just assume simple-bus compatible nodes will
+always be traversed. However, we pass it in as an argument so that
+board support code can always override the default behaviour.
+
+[Need to add discussion of adding i2c/spi/etc child devices]
+
+Appendix A: AMBA devices
+------------------------
+
+ARM Primecells are a certain kind of device attached to the ARM AMBA
+bus which include some support for hardware detection and power
+management. In Linux, struct amba_device and the amba_bus_type is
+used to represent Primecell devices. However, the fiddly bit is that
+not all devices on an AMBA bus are Primecells, and for Linux it is
+typical for both amba_device and platform_device instances to be
+siblings of the same bus segment.
+
+When using the DT, this creates problems for of_platform_populate()
+because it must decide whether to register each node as either a
+platform_device or an amba_device. This unfortunately complicates the
+device creation model a little bit, but the solution turns out not to
+be too invasive. If a node is compatible with "arm,amba-primecell", then
+of_platform_populate() will register it as an amba_device instead of a
+platform_device.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 0c083c5c2faa..b4a898f43c37 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -158,7 +158,6 @@ logo_*.c
logo_*_clut224.c
logo_*_mono.c
lxdialog
-mach
mach-types
mach-types.h
machtypes.h
diff --git a/Documentation/fb/intel810.txt b/Documentation/fb/intel810.txt
index be3e7836abef..a8e9f5bca6f3 100644
--- a/Documentation/fb/intel810.txt
+++ b/Documentation/fb/intel810.txt
@@ -211,7 +211,7 @@ Using the same setup as described above, load the module like this:
modprobe i810fb vram=2 xres=1024 bpp=8 hsync1=30 hsync2=55 vsync1=50 \
vsync2=85 accel=1 mtrr=1
-Or just add the following to /etc/modprobe.conf
+Or just add the following to a configuration file in /etc/modprobe.d/
options i810fb vram=2 xres=1024 bpp=16 hsync1=30 hsync2=55 vsync1=50 \
vsync2=85 accel=1 mtrr=1
diff --git a/Documentation/fb/intelfb.txt b/Documentation/fb/intelfb.txt
index dd9e944ea628..feac4e4d6968 100644
--- a/Documentation/fb/intelfb.txt
+++ b/Documentation/fb/intelfb.txt
@@ -120,7 +120,7 @@ Using the same setup as described above, load the module like this:
modprobe intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1
-Or just add the following to /etc/modprobe.conf
+Or just add the following to a configuration file in /etc/modprobe.d/
options intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1
diff --git a/Documentation/filesystems/files.txt b/Documentation/filesystems/files.txt
index ac2facc50d2a..46dfc6b038c3 100644
--- a/Documentation/filesystems/files.txt
+++ b/Documentation/filesystems/files.txt
@@ -113,8 +113,8 @@ the fdtable structure -
if (fd >= 0) {
/* locate_fd() may have expanded fdtable, load the ptr */
fdt = files_fdtable(files);
- FD_SET(fd, fdt->open_fds);
- FD_CLR(fd, fdt->close_on_exec);
+ __set_open_fd(fd, fdt);
+ __clear_close_on_exec(fd, fdt);
spin_unlock(&files->file_lock);
.....
diff --git a/Documentation/i2c/busses/scx200_acb b/Documentation/i2c/busses/scx200_acb
index 7c07883d4dfc..ce83c871fe95 100644
--- a/Documentation/i2c/busses/scx200_acb
+++ b/Documentation/i2c/busses/scx200_acb
@@ -28,5 +28,5 @@ If the scx200_acb driver is built into the kernel, add the following
parameter to your boot command line:
scx200_acb.base=0x810,0x820
If the scx200_acb driver is built as a module, add the following line to
-the file /etc/modprobe.conf instead:
+a configuration file in /etc/modprobe.d/ instead:
options scx200_acb base=0x810,0x820
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index e77bebfa7b0d..7aca987c23d9 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -169,7 +169,7 @@ When using ide.c as a module in combination with kmod, add:
alias block-major-3 ide-probe
-to /etc/modprobe.conf.
+to a configuration file in /etc/modprobe.d/.
When ide.c is used as a module, you can pass command line parameters to the
driver using the "options=" keyword to insmod, while replacing any ',' with
diff --git a/Documentation/input/input.txt b/Documentation/input/input.txt
index b3d6787b4fb1..666c06c5ab0c 100644
--- a/Documentation/input/input.txt
+++ b/Documentation/input/input.txt
@@ -250,8 +250,8 @@ And so on up to event31.
a USB keyboard works and is correctly connected to the kernel keyboard
driver.
- Doing a cat /dev/input/mouse0 (c, 13, 32) will verify that a mouse
-is also emulated, characters should appear if you move it.
+ Doing a "cat /dev/input/mouse0" (c, 13, 32) will verify that a mouse
+is also emulated; characters should appear if you move it.
You can test the joystick emulation with the 'jstest' utility,
available in the joystick package (see Documentation/input/joystick.txt).
diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset
index ef3343eaa002..7534c6039adc 100644
--- a/Documentation/isdn/README.gigaset
+++ b/Documentation/isdn/README.gigaset
@@ -97,8 +97,7 @@ GigaSet 307x Device Driver
2.5.): 1=on (default), 0=off
Depending on your distribution you may want to create a separate module
- configuration file /etc/modprobe.d/gigaset for these, or add them to a
- custom file like /etc/modprobe.conf.local.
+ configuration file like /etc/modprobe.d/gigaset.conf for these.
2.2. Device nodes for user space programs
------------------------------------
@@ -212,8 +211,8 @@ GigaSet 307x Device Driver
options ppp_async flag_time=0
- to an appropriate module configuration file, like /etc/modprobe.d/gigaset
- or /etc/modprobe.conf.local.
+ to an appropriate module configuration file, like
+ /etc/modprobe.d/gigaset.conf.
Unimodem mode is needed for making some devices [e.g. SX100] work which
do not support the regular Gigaset command set. If debug output (see
@@ -237,8 +236,8 @@ GigaSet 307x Device Driver
modprobe usb_gigaset startmode=0
or by adding a line like
options usb_gigaset startmode=0
- to an appropriate module configuration file, like /etc/modprobe.d/gigaset
- or /etc/modprobe.conf.local.
+ to an appropriate module configuration file, like
+ /etc/modprobe.d/gigaset.conf
2.6. Call-ID (CID) mode
------------------
@@ -310,7 +309,7 @@ GigaSet 307x Device Driver
options isdn dialtimeout=15
- to /etc/modprobe.d/gigaset, /etc/modprobe.conf.local or a similar file.
+ to /etc/modprobe.d/gigaset.conf or a similar file.
Problem:
The isdnlog program emits error messages or just doesn't work.
@@ -350,8 +349,7 @@ GigaSet 307x Device Driver
The initial value can be set using the debug parameter when loading the
module "gigaset", e.g. by adding a line
options gigaset debug=0
- to your module configuration file, eg. /etc/modprobe.d/gigaset or
- /etc/modprobe.conf.local.
+ to your module configuration file, eg. /etc/modprobe.d/gigaset.conf
Generated debugging information can be found
- as output of the command
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index c313d71324b4..9d5f2a90dca9 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -28,12 +28,10 @@ new (default) values, so you can use:
grep "(NEW)" conf.new
-to see the new config symbols or you can 'diff' the previous and
-new .config files to see the differences:
+to see the new config symbols or you can use diffconfig to see the
+differences between the previous and new .config files:
- diff .config.old .config | less
-
-(Yes, we need something better here.)
+ scripts/diffconfig .config.old .config | less
______________________________________________________________________
Environment variables for '*config'
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e2f8c297a8a4..c1601e5a8b71 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1699,6 +1699,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The default is to send the implementation identification
information.
+ nfsd.nfs4_disable_idmapping=
+ [NFSv4] When set to the default of '1', the NFSv4
+ server will return only numeric uids and gids to
+ clients using auth_sys, and will accept numeric uids
+ and gids from such clients. This is intended to ease
+ migration from NFSv2/v3.
objlayoutdriver.osd_login_prog=
[NFS] [OBJLAYOUT] sets the pathname to the program which
diff --git a/Documentation/laptops/sonypi.txt b/Documentation/laptops/sonypi.txt
index 4857acfc50f1..606bdb9ce036 100644
--- a/Documentation/laptops/sonypi.txt
+++ b/Documentation/laptops/sonypi.txt
@@ -110,7 +110,7 @@ Module use:
-----------
In order to automatically load the sonypi module on use, you can put those
-lines in your /etc/modprobe.conf file:
+lines a configuration file in /etc/modprobe.d/:
alias char-major-10-250 sonypi
options sonypi minor=250
diff --git a/Documentation/mono.txt b/Documentation/mono.txt
index e8e1758e87da..d01ac6052194 100644
--- a/Documentation/mono.txt
+++ b/Documentation/mono.txt
@@ -38,11 +38,11 @@ if [ ! -e /proc/sys/fs/binfmt_misc/register ]; then
/sbin/modprobe binfmt_misc
# Some distributions, like Fedora Core, perform
# the following command automatically when the
- # binfmt_misc module is loaded into the kernel.
+ # binfmt_misc module is loaded into the kernel
+ # or during normal boot up (systemd-based systems).
# Thus, it is possible that the following line
- # is not needed at all. Look at /etc/modprobe.conf
- # to check whether this is applicable or not.
- mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
+ # is not needed at all.
+ mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
fi
# Register support for .NET CLR binaries
diff --git a/Documentation/networking/baycom.txt b/Documentation/networking/baycom.txt
index 4e68849d5639..688f18fd4467 100644
--- a/Documentation/networking/baycom.txt
+++ b/Documentation/networking/baycom.txt
@@ -93,7 +93,7 @@ Every time a driver is inserted into the kernel, it has to know which
modems it should access at which ports. This can be done with the setbaycom
utility. If you are only using one modem, you can also configure the
driver from the insmod command line (or by means of an option line in
-/etc/modprobe.conf).
+/etc/modprobe.d/*.conf).
Examples:
modprobe baycom_ser_fdx mode="ser12*" iobase=0x3f8 irq=4
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 080ad26690ae..bfea8a338901 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -173,9 +173,8 @@ bonding module at load time, or are specified via sysfs.
Module options may be given as command line arguments to the
insmod or modprobe command, but are usually specified in either the
-/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
-distro-specific configuration file (some of which are detailed in the next
-section).
+/etc/modrobe.d/*.conf configuration files, or in a distro-specific
+configuration file (some of which are detailed in the next section).
Details on bonding support for sysfs is provided in the
"Configuring Bonding Manually via Sysfs" section, below.
@@ -1021,7 +1020,7 @@ ifcfg-bondX files.
Because the sysconfig scripts supply the bonding module
options in the ifcfg-bondX file, it is not necessary to add them to
-the system /etc/modules.conf or /etc/modprobe.conf configuration file.
+the system /etc/modules.d/*.conf configuration files.
3.2 Configuration with Initscripts Support
------------------------------------------
@@ -1098,15 +1097,13 @@ queried targets, e.g.,
arp_ip_target=+192.168.1.1 arp_ip_target=+192.168.1.2
is the proper syntax to specify multiple targets. When specifying
-options via BONDING_OPTS, it is not necessary to edit /etc/modules.conf or
-/etc/modprobe.conf.
+options via BONDING_OPTS, it is not necessary to edit /etc/modprobe.d/*.conf.
For even older versions of initscripts that do not support
-BONDING_OPTS, it is necessary to edit /etc/modules.conf (or
-/etc/modprobe.conf, depending upon your distro) to load the bonding module
-with your desired options when the bond0 interface is brought up. The
-following lines in /etc/modules.conf (or modprobe.conf) will load the
-bonding module, and select its options:
+BONDING_OPTS, it is necessary to edit /etc/modprobe.d/*.conf, depending upon
+your distro) to load the bonding module with your desired options when the
+bond0 interface is brought up. The following lines in /etc/modprobe.d/*.conf
+will load the bonding module, and select its options:
alias bond0 bonding
options bond0 mode=balance-alb miimon=100
@@ -1152,7 +1149,7 @@ knowledge of bonding. One such distro is SuSE Linux Enterprise Server
version 8.
The general method for these systems is to place the bonding
-module parameters into /etc/modules.conf or /etc/modprobe.conf (as
+module parameters into a config file in /etc/modprobe.d/ (as
appropriate for the installed distro), then add modprobe and/or
ifenslave commands to the system's global init script. The name of
the global init script differs; for sysconfig, it is
@@ -1228,7 +1225,7 @@ network initialization scripts.
specify a different name for each instance (the module loading system
requires that every loaded module, even multiple instances of the same
module, have a unique name). This is accomplished by supplying multiple
-sets of bonding options in /etc/modprobe.conf, for example:
+sets of bonding options in /etc/modprobe.d/*.conf, for example:
alias bond0 bonding
options bond0 -o bond0 mode=balance-rr miimon=100
@@ -1793,8 +1790,8 @@ route additions may cause trouble.
On systems with network configuration scripts that do not
associate physical devices directly with network interface names (so
that the same physical device always has the same "ethX" name), it may
-be necessary to add some special logic to either /etc/modules.conf or
-/etc/modprobe.conf (depending upon which is installed on the system).
+be necessary to add some special logic to config files in
+/etc/modprobe.d/.
For example, given a modules.conf containing the following:
@@ -1821,20 +1818,15 @@ add above bonding e1000 tg3
bonding is loaded. This command is fully documented in the
modules.conf manual page.
- On systems utilizing modprobe.conf (or modprobe.conf.local),
-an equivalent problem can occur. In this case, the following can be
-added to modprobe.conf (or modprobe.conf.local, as appropriate), as
-follows (all on one line; it has been split here for clarity):
+ On systems utilizing modprobe an equivalent problem can occur.
+In this case, the following can be added to config files in
+/etc/modprobe.d/ as:
-install bonding /sbin/modprobe tg3; /sbin/modprobe e1000;
- /sbin/modprobe --ignore-install bonding
+softdep bonding pre: tg3 e1000
- This will, when loading the bonding module, rather than
-performing the normal action, instead execute the provided command.
-This command loads the device drivers in the order needed, then calls
-modprobe with --ignore-install to cause the normal action to then take
-place. Full documentation on this can be found in the modprobe.conf
-and modprobe manual pages.
+ This will load tg3 and e1000 modules before loading the bonding one.
+Full documentation on this can be found in the modprobe.d and modprobe
+manual pages.
8.3. Painfully Slow Or No Failed Link Detection By Miimon
---------------------------------------------------------
diff --git a/Documentation/networking/dl2k.txt b/Documentation/networking/dl2k.txt
index 10e8490fa406..cba74f7a3abc 100644
--- a/Documentation/networking/dl2k.txt
+++ b/Documentation/networking/dl2k.txt
@@ -45,12 +45,13 @@ Now eth0 should active, you can test it by "ping" or get more information by
"ifconfig". If tested ok, continue the next step.
4. cp dl2k.ko /lib/modules/`uname -r`/kernel/drivers/net
-5. Add the following line to /etc/modprobe.conf:
+5. Add the following line to /etc/modprobe.d/dl2k.conf:
alias eth0 dl2k
-6. Run "netconfig" or "netconf" to create configuration script ifcfg-eth0
+6. Run depmod to updated module indexes.
+7. Run "netconfig" or "netconf" to create configuration script ifcfg-eth0
located at /etc/sysconfig/network-scripts or create it manually.
[see - Configuration Script Sample]
-7. Driver will automatically load and configure at next boot time.
+8. Driver will automatically load and configure at next boot time.
Compiling the Driver
====================
@@ -154,8 +155,8 @@ Installing the Driver
-----------------
1. Copy dl2k.o to the network modules directory, typically
/lib/modules/2.x.x-xx/net or /lib/modules/2.x.x/kernel/drivers/net.
- 2. Locate the boot module configuration file, most commonly modprobe.conf
- or modules.conf (for 2.4) in the /etc directory. Add the following lines:
+ 2. Locate the boot module configuration file, most commonly in the
+ /etc/modprobe.d/ directory. Add the following lines:
alias ethx dl2k
options dl2k <optional parameters>
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index 162f323a7a1f..fcb6c71cdb69 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -94,8 +94,8 @@ Additional Configurations
Configuring a network driver to load properly when the system is started is
distribution dependent. Typically, the configuration process involves adding
- an alias line to /etc/modules.conf or /etc/modprobe.conf as well as editing
- other system startup scripts and/or configuration files. Many popular Linux
+ an alias line to /etc/modprobe.d/*.conf as well as editing other system
+ startup scripts and/or configuration files. Many popular Linux
distributions ship with tools to make these changes for you. To learn the
proper way to configure a network device for your system, refer to your
distribution documentation. If during this process you are asked for the
@@ -103,7 +103,7 @@ Additional Configurations
PRO/100 Family of Adapters is e100.
As an example, if you install the e100 driver for two PRO/100 adapters
- (eth0 and eth1), add the following to modules.conf or modprobe.conf:
+ (eth0 and eth1), add the following to a configuraton file in /etc/modprobe.d/
alias eth0 e100
alias eth1 e100
diff --git a/Documentation/networking/ipv6.txt b/Documentation/networking/ipv6.txt
index 9fd7e21296c8..6cd74fa55358 100644
--- a/Documentation/networking/ipv6.txt
+++ b/Documentation/networking/ipv6.txt
@@ -2,9 +2,9 @@
Options for the ipv6 module are supplied as parameters at load time.
Module options may be given as command line arguments to the insmod
-or modprobe command, but are usually specified in either the
-/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
-distro-specific configuration file.
+or modprobe command, but are usually specified in either
+/etc/modules.d/*.conf configuration files, or in a distro-specific
+configuration file.
The available ipv6 module parameters are listed below. If a parameter
is not specified the default value is used.
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index e196f16df313..d75a1f9565bb 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -274,9 +274,9 @@ Additional Configurations
-------------------------------------------------
Configuring a network driver to load properly when the system is started is
distribution dependent. Typically, the configuration process involves adding
- an alias line to /etc/modprobe.conf as well as editing other system startup
- scripts and/or configuration files. Many popular Linux distributions ship
- with tools to make these changes for you. To learn the proper way to
+ an alias line to files in /etc/modprobe.d/ as well as editing other system
+ startup scripts and/or configuration files. Many popular Linux distributions
+ ship with tools to make these changes for you. To learn the proper way to
configure a network device for your system, refer to your distribution
documentation. If during this process you are asked for the driver or module
name, the name for the Linux Base Driver for the Intel 10GbE Family of
diff --git a/Documentation/networking/ltpc.txt b/Documentation/networking/ltpc.txt
index fe2a9129d959..0bf3220c715b 100644
--- a/Documentation/networking/ltpc.txt
+++ b/Documentation/networking/ltpc.txt
@@ -25,7 +25,7 @@ the driver will try to determine them itself.
If you load the driver as a module, you can pass the parameters "io=",
"irq=", and "dma=" on the command line with insmod or modprobe, or add
-them as options in /etc/modprobe.conf:
+them as options in a configuration file in /etc/modprobe.d/ directory:
alias lt0 ltpc # autoload the module when the interface is configured
options ltpc io=0x240 irq=9 dma=1
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index bd70976b8160..b4038ffb3bc5 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -67,8 +67,8 @@ Module parameters
=================
There are several parameters which may be provided to the driver when
-its module is loaded. These are usually placed in /etc/modprobe.conf
-(/etc/modules.conf in 2.4). Example:
+its module is loaded. These are usually placed in /etc/modprobe.d/*.conf
+configuretion files. Example:
options 3c59x debug=3 rx_copybreak=300
@@ -425,7 +425,7 @@ steps you should take:
1) Increase the debug level. Usually this is done via:
a) modprobe driver debug=7
- b) In /etc/modprobe.conf (or /etc/modules.conf for 2.4):
+ b) In /etc/modprobe.d/driver.conf:
options driver debug=7
2) Recreate the problem with the higher debug level,
diff --git a/Documentation/parport.txt b/Documentation/parport.txt
index 93a7ceef398d..c208e4366c03 100644
--- a/Documentation/parport.txt
+++ b/Documentation/parport.txt
@@ -36,18 +36,17 @@ addresses should not be specified for supported PCI cards since they
are automatically detected.
-KMod
-----
+modprobe
+--------
-If you use kmod, you will find it useful to edit /etc/modprobe.conf.
-Here is an example of the lines that need to be added:
+If you use modprobe , you will find it useful to add lines as below to a
+configuration file in /etc/modprobe.d/ directory:.
alias parport_lowlevel parport_pc
options parport_pc io=0x378,0x278 irq=7,auto
-KMod will then automatically load parport_pc (with the options
-"io=0x378,0x278 irq=7,auto") whenever a parallel port device driver
-(such as lp) is loaded.
+modprobe will load parport_pc (with the options "io=0x378,0x278 irq=7,auto")
+whenever a parallel port device driver (such as lp) is loaded.
Note that these are example lines only! You shouldn't in general need
to specify any options to parport_pc in order to be able to use a
diff --git a/Documentation/s390/3270.txt b/Documentation/s390/3270.txt
index 7a5c73a7ed7f..7c715de99774 100644
--- a/Documentation/s390/3270.txt
+++ b/Documentation/s390/3270.txt
@@ -47,9 +47,9 @@ including the console 3270, changes subchannel identifier relative to
one another. ReIPL as soon as possible after running the configuration
script and the resulting /tmp/mkdev3270.
-If you have chosen to make tub3270 a module, you add a line to
-/etc/modprobe.conf. If you are working on a VM virtual machine, you
-can use DEF GRAF to define virtual 3270 devices.
+If you have chosen to make tub3270 a module, you add a line to a
+configuration file under /etc/modprobe.d/. If you are working on a VM
+virtual machine, you can use DEF GRAF to define virtual 3270 devices.
You may generate both 3270 and 3215 console support, or one or the
other, or neither. If you generate both, the console type under VM is
@@ -60,7 +60,7 @@ at boot time to a 3270 if it is a 3215.
In brief, these are the steps:
1. Install the tub3270 patch
- 2. (If a module) add a line to /etc/modprobe.conf
+ 2. (If a module) add a line to a file in /etc/modprobe.d/*.conf
3. (If VM) define devices with DEF GRAF
4. Reboot
5. Configure
@@ -84,13 +84,12 @@ Here are the installation steps in detail:
make modules_install
2. (Perform this step only if you have configured tub3270 as a
- module.) Add a line to /etc/modprobe.conf to automatically
- load the driver when it's needed. With this line added,
- you will see login prompts appear on your 3270s as soon as
- boot is complete (or with emulated 3270s, as soon as you dial
- into your vm guest using the command "DIAL <vmguestname>").
- Since the line-mode major number is 227, the line to add to
- /etc/modprobe.conf should be:
+ module.) Add a line to a file /etc/modprobe.d/*.conf to automatically
+ load the driver when it's needed. With this line added, you will see
+ login prompts appear on your 3270s as soon as boot is complete (or
+ with emulated 3270s, as soon as you dial into your vm guest using the
+ command "DIAL <vmguestname>"). Since the line-mode major number is
+ 227, the line to add should be:
alias char-major-227 tub3270
3. Define graphic devices to your vm guest machine, if you
diff --git a/Documentation/scsi/aic79xx.txt b/Documentation/scsi/aic79xx.txt
index 64ac7093c872..e2d3273000d4 100644
--- a/Documentation/scsi/aic79xx.txt
+++ b/Documentation/scsi/aic79xx.txt
@@ -215,7 +215,7 @@ The following information is available in this file:
INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
USE THEM WITH CAUTION.
- Edit the file "modprobe.conf" in the directory /etc and add/edit a
+ Put a .conf file in the /etc/modprobe.d/ directory and add/edit a
line containing 'options aic79xx aic79xx=[command[,command...]]' where
'command' is one or more of the following:
-----------------------------------------------------------------
diff --git a/Documentation/scsi/aic7xxx.txt b/Documentation/scsi/aic7xxx.txt
index 18f8d1905e6a..7c5d0223d444 100644
--- a/Documentation/scsi/aic7xxx.txt
+++ b/Documentation/scsi/aic7xxx.txt
@@ -190,7 +190,7 @@ The following information is available in this file:
INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
USE THEM WITH CAUTION.
- Edit the file "modprobe.conf" in the directory /etc and add/edit a
+ Put a .conf file in the /etc/modprobe.d directory and add/edit a
line containing 'options aic7xxx aic7xxx=[command[,command...]]' where
'command' is one or more of the following:
-----------------------------------------------------------------
diff --git a/Documentation/scsi/osst.txt b/Documentation/scsi/osst.txt
index ad86c6d1e898..00c8ebb2fd18 100644
--- a/Documentation/scsi/osst.txt
+++ b/Documentation/scsi/osst.txt
@@ -66,7 +66,7 @@ recognized.
If you want to have the module autoloaded on access to /dev/osst, you may
add something like
alias char-major-206 osst
-to your /etc/modprobe.conf (before 2.6: modules.conf).
+to a file under /etc/modprobe.d/ directory.
You may find it convenient to create a symbolic link
ln -s nosst0 /dev/tape
diff --git a/Documentation/serial/computone.txt b/Documentation/serial/computone.txt
index 39ddcdbeeb85..a6a1158ea2ba 100644
--- a/Documentation/serial/computone.txt
+++ b/Documentation/serial/computone.txt
@@ -49,7 +49,7 @@ Hardware - If you have an ISA card, find a free interrupt and io port.
Note the hardware address from the Computone ISA cards installed into
the system. These are required for editing ip2.c or editing
- /etc/modprobe.conf, or for specification on the modprobe
+ /etc/modprobe.d/*.conf, or for specification on the modprobe
command line.
Note that the /etc/modules.conf should be used for older (pre-2.6)
@@ -66,7 +66,7 @@ b) Run "make config" or "make menuconfig" or "make xconfig"
c) Set address on ISA cards then:
edit /usr/src/linux/drivers/char/ip2.c if needed
or
- edit /etc/modprobe.conf if needed (module).
+ edit config file in /etc/modprobe.d/ if needed (module).
or both to match this setting.
d) Run "make modules"
e) Run "make modules_install"
@@ -153,11 +153,11 @@ the irqs are not specified the driver uses the default in ip2.c (which
selects polled mode). If no base addresses are specified the defaults in
ip2.c are used. If you are autoloading the driver module with kerneld or
kmod the base addresses and interrupt number must also be set in ip2.c
-and recompile or just insert and options line in /etc/modprobe.conf or both.
+and recompile or just insert and options line in /etc/modprobe.d/*.conf or both.
The options line is equivalent to the command line and takes precedence over
what is in ip2.c.
-/etc/modprobe.conf sample:
+config sample to put /etc/modprobe.d/*.conf:
options ip2 io=1,0x328 irq=1,10
alias char-major-71 ip2
alias char-major-72 ip2
diff --git a/Documentation/serial/rocket.txt b/Documentation/serial/rocket.txt
index 1d8582990435..60b039891057 100644
--- a/Documentation/serial/rocket.txt
+++ b/Documentation/serial/rocket.txt
@@ -62,7 +62,7 @@ in the system log at /var/log/messages.
If installed as a module, the module must be loaded. This can be done
manually by entering "modprobe rocket". To have the module loaded automatically
-upon system boot, edit the /etc/modprobe.conf file and add the line
+upon system boot, edit a /etc/modprobe.d/*.conf file and add the line
"alias char-major-46 rocket".
In order to use the ports, their device names (nodes) must be created with mknod.
diff --git a/Documentation/serial/stallion.txt b/Documentation/serial/stallion.txt
index 5c4902d9a5be..55090914a9c5 100644
--- a/Documentation/serial/stallion.txt
+++ b/Documentation/serial/stallion.txt
@@ -139,8 +139,8 @@ secondary address 0x280 and IRQ 10.
You will probably want to enter this module load and configuration information
into your system startup scripts so that the drivers are loaded and configured
-on each system boot. Typically the start up script would be something like
-/etc/modprobe.conf.
+on each system boot. Typically configuration files are put in the
+/etc/modprobe.d/ directory.
2.2 STATIC DRIVER CONFIGURATION:
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 6f75ba3b8a39..8c16d50f6cb6 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -2044,7 +2044,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
Install the necessary firmware files in alsa-firmware package.
When no hotplug fw loader is available, you need to load the
firmware via vxloader utility in alsa-tools package. To invoke
- vxloader automatically, add the following to /etc/modprobe.conf
+ vxloader automatically, add the following to /etc/modprobe.d/alsa.conf
install snd-vx222 /sbin/modprobe --first-time -i snd-vx222 && /usr/bin/vxloader
@@ -2168,10 +2168,10 @@ corresponds to the card index of ALSA. Usually, define this
as the same card module.
An example configuration for a single emu10k1 card is like below:
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
alias snd-card-0 snd-emu10k1
alias sound-slot-0 snd-emu10k1
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
The available number of auto-loaded sound cards depends on the module
option "cards_limit" of snd module. As default it's set to 1.
@@ -2184,7 +2184,7 @@ cards is kept consistent.
An example configuration for two sound cards is like below:
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
# ALSA portion
options snd cards_limit=2
alias snd-card-0 snd-interwave
@@ -2194,7 +2194,7 @@ options snd-ens1371 index=1
# OSS/Free portion
alias sound-slot-0 snd-interwave
alias sound-slot-1 snd-ens1371
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
In this example, the interwave card is always loaded as the first card
(index 0) and ens1371 as the second (index 1).
diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/alsa/Audiophile-Usb.txt
index a4c53d8961e1..654dd3b694a8 100644
--- a/Documentation/sound/alsa/Audiophile-Usb.txt
+++ b/Documentation/sound/alsa/Audiophile-Usb.txt
@@ -232,7 +232,7 @@ The parameter can be given:
# modprobe snd-usb-audio index=1 device_setup=0x09
* Or while configuring the modules options in your modules configuration file
- - For Fedora distributions, edit the /etc/modprobe.conf file:
+ (tipically a .conf file in /etc/modprobe.d/ directory:
alias snd-card-1 snd-usb-audio
options snd-usb-audio index=1 device_setup=0x09
@@ -253,7 +253,7 @@ CAUTION when initializing the device
- first turn off the device
- de-register the snd-usb-audio module (modprobe -r)
- change the device_setup parameter by changing the device_setup
- option in /etc/modprobe.conf
+ option in /etc/modprobe.d/*.conf
- turn on the device
* A workaround for this last issue has been applied to kernel 2.6.23, but it may not
be enough to ensure the 'stability' of the device initialization.
diff --git a/Documentation/sound/alsa/MIXART.txt b/Documentation/sound/alsa/MIXART.txt
index ef42c44fa1f2..4ee35b4fbe4a 100644
--- a/Documentation/sound/alsa/MIXART.txt
+++ b/Documentation/sound/alsa/MIXART.txt
@@ -76,9 +76,9 @@ FIRMWARE
when CONFIG_FW_LOADER is set. The mixartloader is necessary only
for older versions or when you build the driver into kernel.]
-For loading the firmware automatically after the module is loaded, use
-the post-install command. For example, add the following entry to
-/etc/modprobe.conf for miXart driver:
+For loading the firmware automatically after the module is loaded, use a
+install command. For example, add the following entry to
+/etc/modprobe.d/mixart.conf for miXart driver:
install snd-mixart /sbin/modprobe --first-time -i snd-mixart && \
/usr/bin/mixartloader
diff --git a/Documentation/sound/alsa/OSS-Emulation.txt b/Documentation/sound/alsa/OSS-Emulation.txt
index 022aaeb0e9dd..152ca2a3f1bd 100644
--- a/Documentation/sound/alsa/OSS-Emulation.txt
+++ b/Documentation/sound/alsa/OSS-Emulation.txt
@@ -19,7 +19,7 @@ the card number and the minor unit number. Usually you don't have to
define these aliases by yourself.
Only necessary step for auto-loading of OSS modules is to define the
-card alias in /etc/modprobe.conf, such as
+card alias in /etc/modprobe.d/alsa.conf, such as
alias sound-slot-0 snd-emu10k1
diff --git a/Documentation/sound/oss/AudioExcelDSP16 b/Documentation/sound/oss/AudioExcelDSP16
index e0dc0641b480..ea8549faede9 100644
--- a/Documentation/sound/oss/AudioExcelDSP16
+++ b/Documentation/sound/oss/AudioExcelDSP16
@@ -41,7 +41,7 @@ mpu_base I/O base address for activate MPU-401 mode
(0x300, 0x310, 0x320 or 0x330)
mpu_irq MPU-401 irq line (5, 7, 9, 10 or 0)
-The /etc/modprobe.conf will have lines like this:
+A configuration file in /etc/modprobe.d/ directory will have lines like this:
options opl3 io=0x388
options ad1848 io=0x530 irq=11 dma=3
@@ -51,11 +51,11 @@ Where the aedsp16 options are the options for this driver while opl3 and
ad1848 are the corresponding options for the MSS and OPL3 modules.
Loading MSS and OPL3 needs to pre load the aedsp16 module to set up correctly
-the sound card. Installation dependencies must be written in the modprobe.conf
-file:
+the sound card. Installation dependencies must be written in configuration
+files under /etc/modprobe.d/ directory:
-install ad1848 /sbin/modprobe aedsp16 && /sbin/modprobe -i ad1848
-install opl3 /sbin/modprobe aedsp16 && /sbin/modprobe -i opl3
+softdep ad1848 pre: aedsp16
+softdep opl3 pre: aedsp16
Then you must load the sound modules stack in this order:
sound -> aedsp16 -> [ ad1848, opl3 ]
diff --git a/Documentation/sound/oss/CMI8330 b/Documentation/sound/oss/CMI8330
index 9c439f1a6dba..8a5fd1611c6f 100644
--- a/Documentation/sound/oss/CMI8330
+++ b/Documentation/sound/oss/CMI8330
@@ -143,11 +143,10 @@ CONFIG_SOUND_MSS=m
-Alma Chao <elysian@ethereal.torsion.org> suggests the following /etc/modprobe.conf:
+Alma Chao <elysian@ethereal.torsion.org> suggests the following in
+a /etc/modprobe.d/*conf file:
alias sound ad1848
alias synth0 opl3
options ad1848 io=0x530 irq=7 dma=0 soundpro=1
options opl3 io=0x388
-
-
diff --git a/Documentation/sound/oss/Introduction b/Documentation/sound/oss/Introduction
index 75d967ff9266..42da2d8fa372 100644
--- a/Documentation/sound/oss/Introduction
+++ b/Documentation/sound/oss/Introduction
@@ -167,8 +167,8 @@ in a file such as /root/soundon.sh.
MODPROBE:
=========
-If loading via modprobe, these common files are automatically loaded
-when requested by modprobe. For example, my /etc/modprobe.conf contains:
+If loading via modprobe, these common files are automatically loaded when
+requested by modprobe. For example, my /etc/modprobe.d/oss.conf contains:
alias sound sb
options sb io=0x240 irq=9 dma=3 dma16=5 mpu_io=0x300
@@ -228,7 +228,7 @@ http://www.opensound.com. Before loading the commercial sound
driver, you should do the following:
1. remove sound modules (detailed above)
-2. remove the sound modules from /etc/modprobe.conf
+2. remove the sound modules from /etc/modprobe.d/*.conf
3. move the sound modules from /lib/modules/<kernel>/misc
(for example, I make a /lib/modules/<kernel>/misc/tmp
directory and copy the sound module files to that
@@ -265,7 +265,7 @@ twice, you need to do the following:
sb.o could be copied (or symlinked) to sb1.o for the
second SoundBlaster.
-2. Make a second entry in /etc/modprobe.conf, for example,
+2. Make a second entry in /etc/modprobe.d/*conf, for example,
sound1 or sb1. This second entry should refer to the
new module names for example sb1, and should include
the I/O, etc. for the second sound card.
@@ -369,7 +369,7 @@ There are several ways of configuring your sound:
2) On the command line when using insmod or in a bash script
using command line calls to load sound.
-3) In /etc/modprobe.conf when using modprobe.
+3) In /etc/modprobe.d/*conf when using modprobe.
4) Via Red Hat's GPL'd /usr/sbin/sndconfig program (text based).
diff --git a/Documentation/sound/oss/Opti b/Documentation/sound/oss/Opti
index c15af3c07d46..4cd5d9ab3580 100644
--- a/Documentation/sound/oss/Opti
+++ b/Documentation/sound/oss/Opti
@@ -18,7 +18,7 @@ force the card into a mode in which it can be programmed.
If you have another OS installed on your computer it is recommended
that Linux and the other OS use the same resources.
-Also, it is recommended that resources specified in /etc/modprobe.conf
+Also, it is recommended that resources specified in /etc/modprobe.d/*.conf
and resources specified in /etc/isapnp.conf agree.
Compiling the sound driver
@@ -67,11 +67,7 @@ address is hard-coded into the driver.
Using kmod and autoloading the sound driver
-------------------------------------------
-Comment: as of linux-2.1.90 kmod is replacing kerneld.
-The config file '/etc/modprobe.conf' is used as before.
-
-This is the sound part of my /etc/modprobe.conf file.
-Following that I will explain each line.
+Config files in '/etc/modprobe.d/' are used as below:
alias mixer0 mad16
alias audio0 mad16
diff --git a/Documentation/sound/oss/PAS16 b/Documentation/sound/oss/PAS16
index 3dca4b75988e..5c27229eec8c 100644
--- a/Documentation/sound/oss/PAS16
+++ b/Documentation/sound/oss/PAS16
@@ -128,7 +128,7 @@ CONFIG_SOUND_YM3812
You can then get OPL3 functionality by issuing the command:
insmod opl3
In addition, you must either add the following line to
- /etc/modprobe.conf:
+ /etc/modprobe.d/*.conf:
options opl3 io=0x388
or else add the following line to /etc/lilo.conf:
opl3=0x388
@@ -158,5 +158,5 @@ following line would be appropriate:
append="pas2=0x388,10,3,-1,0,-1,-1,-1 opl3=0x388"
If sound is built totally modular, the above options may be
-specified in /etc/modprobe.conf for pas2, sb and opl3
+specified in /etc/modprobe.d/*.conf for pas2, sb and opl3
respectively.
diff --git a/Documentation/sound/oss/README.modules b/Documentation/sound/oss/README.modules
index e691d74e1e5e..cdc039421a46 100644
--- a/Documentation/sound/oss/README.modules
+++ b/Documentation/sound/oss/README.modules
@@ -26,7 +26,7 @@ Note that it is no longer necessary or possible to configure sound in the
drivers/sound dir. Now one simply configures and makes one's kernel and
modules in the usual way.
- Then, add to your /etc/modprobe.conf something like:
+ Then, add to your /etc/modprobe.d/oss.conf something like:
alias char-major-14-* sb
install sb /sbin/modprobe -i sb && /sbin/modprobe adlib_card
@@ -36,7 +36,7 @@ options adlib_card io=0x388 # FM synthesizer
Alternatively, if you have compiled in kernel level ISAPnP support:
alias char-major-14 sb
-post-install sb /sbin/modprobe "-k" "adlib_card"
+softdep sb post: adlib_card
options adlib_card io=0x388
The effect of this is that the sound driver and all necessary bits and
@@ -66,12 +66,12 @@ args are expected.
Note that at present there is no way to configure the io, irq and other
parameters for the modular drivers as one does for the wired drivers.. One
needs to pass the modules the necessary parameters as arguments, either
-with /etc/modprobe.conf or with command-line args to modprobe, e.g.
+with /etc/modprobe.d/*.conf or with command-line args to modprobe, e.g.
modprobe sb io=0x220 irq=7 dma=1 dma16=5 mpu_io=0x330
modprobe adlib_card io=0x388
- recommend using /etc/modprobe.conf.
+ recommend using /etc/modprobe.d/*.conf.
Persistent DMA Buffers:
@@ -89,7 +89,7 @@ wasteful of RAM, but it guarantees that sound always works.
To make the sound driver use persistent DMA buffers we need to pass the
sound.o module a "dmabuf=1" command-line argument. This is normally done
-in /etc/modprobe.conf like so:
+in /etc/modprobe.d/*.conf files like so:
options sound dmabuf=1
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 312e3754e8c5..642f84495b29 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -241,9 +241,8 @@ command you are interested in.
* I have more questions, who can I ask?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-And I'll answer any questions about the registration system you got, also
-responding as soon as possible.
- -Crutcher
+Just ask them on the linux-kernel mailing list:
+ linux-kernel@vger.kernel.org
* Credits
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 817df299ea07..4204eb01fd38 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -179,7 +179,8 @@ do:
modprobe usbcore autosuspend=5
-Equivalently, you could add to /etc/modprobe.conf a line saying:
+Equivalently, you could add to a configuration file in /etc/modprobe.d
+a line saying:
options usbcore autosuspend=5
diff --git a/Documentation/video4linux/CQcam.txt b/Documentation/video4linux/CQcam.txt
index 8977e7ce4dab..6e680fec1e9c 100644
--- a/Documentation/video4linux/CQcam.txt
+++ b/Documentation/video4linux/CQcam.txt
@@ -61,29 +61,19 @@ But that is my personal preference.
2.2 Configuration
The configuration requires module configuration and device
-configuration. I like kmod or kerneld process with the
-/etc/modprobe.conf file so the modules can automatically load/unload as
-they are used. The video devices could already exist, be generated
-using MAKEDEV, or need to be created. The following sections detail
-these procedures.
+configuration. The following sections detail these procedures.
2.1 Module Configuration
Using modules requires a bit of work to install and pass the
-parameters. Understand that entries in /etc/modprobe.conf of:
+parameters. Understand that entries in /etc/modprobe.d/*.conf of:
alias parport_lowlevel parport_pc
options parport_pc io=0x378 irq=none
alias char-major-81 videodev
alias char-major-81-0 c-qcam
-will cause the kmod/modprobe to do certain things. If you are
-using kmod, then a request for a 'char-major-81-0' will cause
-the 'c-qcam' module to load. If you have other video sources with
-modules, you might want to assign the different minor numbers to
-different modules.
-
2.2 Device Configuration
At this point, we need to ensure that the device files exist.
diff --git a/Documentation/video4linux/Zoran b/Documentation/video4linux/Zoran
index 9ed629d4874b..b5a911fd0602 100644
--- a/Documentation/video4linux/Zoran
+++ b/Documentation/video4linux/Zoran
@@ -255,7 +255,7 @@ Load zr36067.o. If it can't autodetect your card, use the card=X insmod
option with X being the card number as given in the previous section.
To have more than one card, use card=X1[,X2[,X3,[X4[..]]]]
-To automate this, add the following to your /etc/modprobe.conf:
+To automate this, add the following to your /etc/modprobe.d/zoran.conf:
options zr36067 card=X1[,X2[,X3[,X4[..]]]]
alias char-major-81-0 zr36067
diff --git a/Documentation/video4linux/bttv/Modules.conf b/Documentation/video4linux/bttv/Modules.conf
index 753f15956eb8..8f258faf18f1 100644
--- a/Documentation/video4linux/bttv/Modules.conf
+++ b/Documentation/video4linux/bttv/Modules.conf
@@ -1,4 +1,4 @@
-# For modern kernels (2.6 or above), this belongs in /etc/modprobe.conf
+# For modern kernels (2.6 or above), this belongs in /etc/modprobe.d/*.conf
# For for 2.4 kernels or earlier, this belongs in /etc/modules.conf.
# i2c
diff --git a/Documentation/video4linux/meye.txt b/Documentation/video4linux/meye.txt
index 34e2842c70ae..a051152ea99c 100644
--- a/Documentation/video4linux/meye.txt
+++ b/Documentation/video4linux/meye.txt
@@ -55,7 +55,7 @@ Module use:
-----------
In order to automatically load the meye module on use, you can put those lines
-in your /etc/modprobe.conf file:
+in your /etc/modprobe.d/meye.conf file:
alias char-major-81 videodev
alias char-major-81-0 meye
diff --git a/MAINTAINERS b/MAINTAINERS
index f9faadef7ab7..eecf3441ac21 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1940,7 +1940,7 @@ F: drivers/connector/
CONTROL GROUPS (CGROUPS)
M: Tejun Heo <tj@kernel.org>
-M: Li Zefan <lizf@cn.fujitsu.com>
+M: Li Zefan <lizefan@huawei.com>
L: containers@lists.linux-foundation.org
L: cgroups@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
@@ -5185,7 +5185,7 @@ F: kernel/delayacct.c
PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
-M: Ingo Molnar <mingo@elte.hu>
+M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
@@ -5833,7 +5833,7 @@ S: Maintained
F: drivers/watchdog/sc1200wdt.c
SCHEDULER
-M: Ingo Molnar <mingo@elte.hu>
+M: Ingo Molnar <mingo@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
diff --git a/arch/Kconfig b/arch/Kconfig
index a6f14f622d13..684eb5af439d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -213,4 +213,7 @@ config HAVE_CMPXCHG_LOCAL
config HAVE_CMPXCHG_DOUBLE
bool
+config ARCH_WANT_OLD_COMPAT_IPC
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h
index db167413300b..24779fc95994 100644
--- a/arch/alpha/include/asm/posix_types.h
+++ b/arch/alpha/include/asm/posix_types.h
@@ -8,116 +8,13 @@
*/
typedef unsigned int __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef unsigned int __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef long long __kernel_loff_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_timer_t;
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-
-typedef unsigned int __kernel_old_dev_t;
-
-#ifdef __KERNEL__
-
-#ifndef __GNUC__
-
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
-#define __FD_ZERO(set) \
- ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
-
-/* With GNU C, use inline functions instead so args are evaluated only once: */
+#define __kernel_ino_t __kernel_ino_t
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
-
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
-
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
+typedef unsigned int __kernel_nlink_t;
+#define __kernel_nlink_t __kernel_nlink_t
-#endif /* __GNUC__ */
+typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
-#endif /* __KERNEL__ */
+#include <asm-generic/posix_types.h>
#endif /* _ALPHA_POSIX_TYPES_H */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5098564d5879..93180845ae16 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,6 +9,7 @@ config ARM
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if (HAVE_KPROBES)
@@ -21,6 +22,7 @@ config ARM
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_XZ
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
@@ -28,10 +30,10 @@ config ARM
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
- select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP
+ select HAVE_BPF_JIT if NET
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
@@ -52,9 +54,6 @@ config MIGHT_HAVE_PCI
config SYS_SUPPORTS_APM_EMULATION
bool
-config HAVE_SCHED_CLOCK
- bool
-
config GENERIC_GPIO
bool
@@ -180,6 +179,9 @@ config ZONE_DMA
config NEED_DMA_MAP_STATE
def_bool y
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+ bool
+
config GENERIC_ISA_DMA
bool
@@ -217,6 +219,13 @@ config ARM_PATCH_PHYS_VIRT
this feature (eg, building a kernel for a single machine) and
you need to shrink the kernel to the minimal size.
+config NEED_MACH_IO_H
+ bool
+ help
+ Select this when mach/io.h is required to provide special
+ definitions for this platform. The need for mach/io.h should
+ be avoided when possible.
+
config NEED_MACH_MEMORY_H
bool
help
@@ -268,7 +277,9 @@ config ARCH_INTEGRATOR
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
select PLAT_VERSATILE_FPGA_IRQ
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
+ select SPARSE_IRQ
help
Support for ARM's Integrator platform.
@@ -315,6 +326,7 @@ config ARCH_VEXPRESS
select HAVE_CLK
select HAVE_PATA_PLATFORM
select ICST
+ select NO_IOPORT
select PLAT_VERSATILE
select PLAT_VERSATILE_CLCD
help
@@ -354,6 +366,7 @@ config ARCH_HIGHBANK
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
select HAVE_SMP
+ select SPARSE_IRQ
select USE_OF
help
Support for the Calxeda Highbank SoC based boards.
@@ -404,6 +417,7 @@ config ARCH_EBSA110
select ISA
select NO_IOPORT
select ARCH_USES_GETTIMEOFFSET
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
help
This is an evaluation board for the StrongARM processor available
@@ -430,6 +444,7 @@ config ARCH_FOOTBRIDGE
select FOOTBRIDGE
select GENERIC_CLOCKEVENTS
select HAVE_IDE
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
help
Support for systems based on the DC21285 companion chip
@@ -442,7 +457,6 @@ config ARCH_MXC
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
- select HAVE_SCHED_CLOCK
select MULTI_IRQ_HANDLER
help
Support for Freescale MXC/iMX-based family of processors
@@ -482,6 +496,7 @@ config ARCH_IOP13XX
select PCI
select ARCH_SUPPORTS_MSI
select VMSPLIT_1G
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select NEED_RET_TO_USER
help
@@ -491,6 +506,7 @@ config ARCH_IOP32X
bool "IOP32x-based"
depends on MMU
select CPU_XSCALE
+ select NEED_MACH_IO_H
select NEED_RET_TO_USER
select PLAT_IOP
select PCI
@@ -503,6 +519,7 @@ config ARCH_IOP33X
bool "IOP33x-based"
depends on MMU
select CPU_XSCALE
+ select NEED_MACH_IO_H
select NEED_RET_TO_USER
select PLAT_IOP
select PCI
@@ -516,6 +533,7 @@ config ARCH_IXP23XX
select CPU_XSC3
select PCI
select ARCH_USES_GETTIMEOFFSET
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
help
Support for Intel's IXP23xx (XScale) family of processors.
@@ -526,6 +544,7 @@ config ARCH_IXP2000
select CPU_XSCALE
select PCI
select ARCH_USES_GETTIMEOFFSET
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
help
Support for Intel's IXP2400/2800 (XScale) family of processors.
@@ -533,12 +552,13 @@ config ARCH_IXP2000
config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
+ select ARCH_HAS_DMA_SET_COHERENT_MASK
select CLKSRC_MMIO
select CPU_XSCALE
select GENERIC_GPIO
select GENERIC_CLOCKEVENTS
- select HAVE_SCHED_CLOCK
select MIGHT_HAVE_PCI
+ select NEED_MACH_IO_H
select DMABOUNCE if PCI
help
Support for Intel's IXP4XX (XScale) family of processors.
@@ -549,6 +569,7 @@ config ARCH_DOVE
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
+ select NEED_MACH_IO_H
select PLAT_ORION
help
Support for the Marvell Dove SoC 88AP510
@@ -559,6 +580,7 @@ config ARCH_KIRKWOOD
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
+ select NEED_MACH_IO_H
select PLAT_ORION
help
Support for the following Marvell Kirkwood series SoCs:
@@ -583,6 +605,7 @@ config ARCH_MV78XX0
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
+ select NEED_MACH_IO_H
select PLAT_ORION
help
Support for the following Marvell MV78xx0 series SoCs:
@@ -608,7 +631,6 @@ config ARCH_MMP
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select GPIO_PXA
- select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select PLAT_PXA
select SPARSE_IRQ
@@ -649,9 +671,9 @@ config ARCH_TEGRA
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
select HAVE_CLK
- select HAVE_SCHED_CLOCK
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
+ select NEED_MACH_IO_H if PCI
select ARCH_HAS_CPUFREQ
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
@@ -666,7 +688,6 @@ config ARCH_PICOXCELL
select DW_APB_TIMER
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
- select HAVE_SCHED_CLOCK
select HAVE_TCM
select NO_IOPORT
select SPARSE_IRQ
@@ -694,7 +715,6 @@ config ARCH_PXA
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select GPIO_PXA
- select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select PLAT_PXA
select SPARSE_IRQ
@@ -745,6 +765,7 @@ config ARCH_RPC
select ARCH_SPARSEMEM_ENABLE
select ARCH_USES_GETTIMEOFFSET
select HAVE_IDE
+ select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
@@ -761,7 +782,6 @@ config ARCH_SA1100
select CPU_FREQ
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
- select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
select HAVE_IDE
@@ -780,6 +800,7 @@ config ARCH_S3C24XX
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select NEED_MACH_IO_H
help
Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443
and S3C2450 SoCs based systems, such as the Simtec Electronics BAST
@@ -818,7 +839,6 @@ config ARCH_S5P64X0
select CLKSRC_MMIO
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select GENERIC_CLOCKEVENTS
- select HAVE_SCHED_CLOCK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
help
@@ -849,7 +869,6 @@ config ARCH_S5PV210
select CLKSRC_MMIO
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
- select HAVE_SCHED_CLOCK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG
@@ -883,6 +902,7 @@ config ARCH_SHARK
select PCI
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
+ select NEED_MACH_IO_H
help
Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>).
@@ -892,7 +912,6 @@ config ARCH_U300
depends on MMU
select CLKSRC_MMIO
select CPU_ARM926T
- select HAVE_SCHED_CLOCK
select HAVE_TCM
select ARM_AMBA
select ARM_PATCH_PHYS_VIRT
@@ -951,7 +970,6 @@ config ARCH_OMAP
select ARCH_HAS_CPUFREQ
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
- select HAVE_SCHED_CLOCK
select ARCH_HAS_HOLES_MEMORYMODEL
help
Support for TI's OMAP platform (OMAP1/2/3/4).
@@ -1115,13 +1133,11 @@ config ARCH_ACORN
config PLAT_IOP
bool
select GENERIC_CLOCKEVENTS
- select HAVE_SCHED_CLOCK
config PLAT_ORION
bool
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
- select HAVE_SCHED_CLOCK
config PLAT_PXA
bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 66ca8014ff3e..85348a09d655 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -292,6 +292,22 @@ choice
Note that the system will appear to hang during boot if there
is nothing connected to read from the DCC.
+ config DEBUG_SEMIHOSTING
+ bool "Kernel low-level debug output via semihosting I"
+ help
+ Semihosting enables code running on an ARM target to use
+ the I/O facilities on a host debugger/emulator through a
+ simple SVC calls. The host debugger or emulator must have
+ semihosting enabled for the special svc call to be trapped
+ otherwise the kernel will crash.
+
+ This is known to work with OpenOCD, as wellas
+ ARM's Fast Models, or any other controlling environment
+ that implements semihosting.
+
+ For more details about semihosting, please see
+ chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd.
+
endchoice
config EARLY_PRINTK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index dcb088e868fe..047a20780fc1 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -253,6 +253,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/
# If we have a machine-specific directory, then include it in the build.
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
+core-y += arch/arm/net/
core-y += $(machdirs) $(platdirs)
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index e0936a148516..d0d441c429ae 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,8 +1,10 @@
+ashldi3.S
font.c
lib1funcs.S
piggy.gzip
piggy.lzo
piggy.lzma
+piggy.xzkern
vmlinux
vmlinux.lds
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index cf0a64ce4b83..bb267562e7ed 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -92,6 +92,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo
suffix_$(CONFIG_KERNEL_LZMA) = lzma
+suffix_$(CONFIG_KERNEL_XZ) = xzkern
# Borrowed libfdt files for the ATAG compatibility mode
@@ -112,10 +113,12 @@ endif
targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \
- lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
+ lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S \
+ font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean
-extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
+extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
+ lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -151,6 +154,12 @@ lib1funcs = $(obj)/lib1funcs.o
$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
$(call cmd,shipped)
+# For __aeabi_llsl
+ashldi3 = $(obj)/ashldi3.o
+
+$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S
+ $(call cmd,shipped)
+
# We need to prevent any GOTOFF relocs being used with references
# to symbols in the .bss section since we cannot relocate them
# independently from the rest at run time. This can be achieved by
@@ -172,7 +181,7 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
fi
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
- $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
+ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE
@$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index 07be5a2f8302..f41b38cafce8 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -44,6 +44,12 @@ extern void error(char *);
#include "../../../../lib/decompress_unlzma.c"
#endif
+#ifdef CONFIG_KERNEL_XZ
+#define memmove memmove
+#define memcpy memcpy
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
{
return decompress(input, len, NULL, NULL, output, NULL, error);
diff --git a/arch/arm/boot/compressed/piggy.xzkern.S b/arch/arm/boot/compressed/piggy.xzkern.S
new file mode 100644
index 000000000000..5703f300d027
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.xzkern.S
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/arm/boot/compressed/piggy.xzkern"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 3bb1d7589bd9..283fa1d804f4 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -24,9 +24,6 @@ config ARM_VIC_NR
config ICST
bool
-config PL330
- bool
-
config SA1111
bool
select DMABOUNCE if !ARCH_PXA
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 69feafe7286c..215816f1775f 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ARM_VIC) += vic.o
obj-$(CONFIG_ICST) += icst.o
-obj-$(CONFIG_PL330) += pl330.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index f0783be17352..aa5269984187 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -686,13 +686,12 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
- hwirq_base = 32;
- if (gic_nr == 0) {
- if ((irq_start & 31) > 0) {
- hwirq_base = 16;
- if (irq_start != -1)
- irq_start = (irq_start & ~31) + 16;
- }
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
}
/*
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
deleted file mode 100644
index ff3ad2244824..000000000000
--- a/arch/arm/common/pl330.c
+++ /dev/null
@@ -1,1960 +0,0 @@
-/* linux/arch/arm/common/pl330.c
- *
- * Copyright (C) 2010 Samsung Electronics Co Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/hardware/pl330.h>
-
-/* Register and Bit field Definitions */
-#define DS 0x0
-#define DS_ST_STOP 0x0
-#define DS_ST_EXEC 0x1
-#define DS_ST_CMISS 0x2
-#define DS_ST_UPDTPC 0x3
-#define DS_ST_WFE 0x4
-#define DS_ST_ATBRR 0x5
-#define DS_ST_QBUSY 0x6
-#define DS_ST_WFP 0x7
-#define DS_ST_KILL 0x8
-#define DS_ST_CMPLT 0x9
-#define DS_ST_FLTCMP 0xe
-#define DS_ST_FAULT 0xf
-
-#define DPC 0x4
-#define INTEN 0x20
-#define ES 0x24
-#define INTSTATUS 0x28
-#define INTCLR 0x2c
-#define FSM 0x30
-#define FSC 0x34
-#define FTM 0x38
-
-#define _FTC 0x40
-#define FTC(n) (_FTC + (n)*0x4)
-
-#define _CS 0x100
-#define CS(n) (_CS + (n)*0x8)
-#define CS_CNS (1 << 21)
-
-#define _CPC 0x104
-#define CPC(n) (_CPC + (n)*0x8)
-
-#define _SA 0x400
-#define SA(n) (_SA + (n)*0x20)
-
-#define _DA 0x404
-#define DA(n) (_DA + (n)*0x20)
-
-#define _CC 0x408
-#define CC(n) (_CC + (n)*0x20)
-
-#define CC_SRCINC (1 << 0)
-#define CC_DSTINC (1 << 14)
-#define CC_SRCPRI (1 << 8)
-#define CC_DSTPRI (1 << 22)
-#define CC_SRCNS (1 << 9)
-#define CC_DSTNS (1 << 23)
-#define CC_SRCIA (1 << 10)
-#define CC_DSTIA (1 << 24)
-#define CC_SRCBRSTLEN_SHFT 4
-#define CC_DSTBRSTLEN_SHFT 18
-#define CC_SRCBRSTSIZE_SHFT 1
-#define CC_DSTBRSTSIZE_SHFT 15
-#define CC_SRCCCTRL_SHFT 11
-#define CC_SRCCCTRL_MASK 0x7
-#define CC_DSTCCTRL_SHFT 25
-#define CC_DRCCCTRL_MASK 0x7
-#define CC_SWAP_SHFT 28
-
-#define _LC0 0x40c
-#define LC0(n) (_LC0 + (n)*0x20)
-
-#define _LC1 0x410
-#define LC1(n) (_LC1 + (n)*0x20)
-
-#define DBGSTATUS 0xd00
-#define DBG_BUSY (1 << 0)
-
-#define DBGCMD 0xd04
-#define DBGINST0 0xd08
-#define DBGINST1 0xd0c
-
-#define CR0 0xe00
-#define CR1 0xe04
-#define CR2 0xe08
-#define CR3 0xe0c
-#define CR4 0xe10
-#define CRD 0xe14
-
-#define PERIPH_ID 0xfe0
-#define PCELL_ID 0xff0
-
-#define CR0_PERIPH_REQ_SET (1 << 0)
-#define CR0_BOOT_EN_SET (1 << 1)
-#define CR0_BOOT_MAN_NS (1 << 2)
-#define CR0_NUM_CHANS_SHIFT 4
-#define CR0_NUM_CHANS_MASK 0x7
-#define CR0_NUM_PERIPH_SHIFT 12
-#define CR0_NUM_PERIPH_MASK 0x1f
-#define CR0_NUM_EVENTS_SHIFT 17
-#define CR0_NUM_EVENTS_MASK 0x1f
-
-#define CR1_ICACHE_LEN_SHIFT 0
-#define CR1_ICACHE_LEN_MASK 0x7
-#define CR1_NUM_ICACHELINES_SHIFT 4
-#define CR1_NUM_ICACHELINES_MASK 0xf
-
-#define CRD_DATA_WIDTH_SHIFT 0
-#define CRD_DATA_WIDTH_MASK 0x7
-#define CRD_WR_CAP_SHIFT 4
-#define CRD_WR_CAP_MASK 0x7
-#define CRD_WR_Q_DEP_SHIFT 8
-#define CRD_WR_Q_DEP_MASK 0xf
-#define CRD_RD_CAP_SHIFT 12
-#define CRD_RD_CAP_MASK 0x7
-#define CRD_RD_Q_DEP_SHIFT 16
-#define CRD_RD_Q_DEP_MASK 0xf
-#define CRD_DATA_BUFF_SHIFT 20
-#define CRD_DATA_BUFF_MASK 0x3ff
-
-#define PART 0x330
-#define DESIGNER 0x41
-#define REVISION 0x0
-#define INTEG_CFG 0x0
-#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
-
-#define PCELL_ID_VAL 0xb105f00d
-
-#define PL330_STATE_STOPPED (1 << 0)
-#define PL330_STATE_EXECUTING (1 << 1)
-#define PL330_STATE_WFE (1 << 2)
-#define PL330_STATE_FAULTING (1 << 3)
-#define PL330_STATE_COMPLETING (1 << 4)
-#define PL330_STATE_WFP (1 << 5)
-#define PL330_STATE_KILLING (1 << 6)
-#define PL330_STATE_FAULT_COMPLETING (1 << 7)
-#define PL330_STATE_CACHEMISS (1 << 8)
-#define PL330_STATE_UPDTPC (1 << 9)
-#define PL330_STATE_ATBARRIER (1 << 10)
-#define PL330_STATE_QUEUEBUSY (1 << 11)
-#define PL330_STATE_INVALID (1 << 15)
-
-#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
- | PL330_STATE_WFE | PL330_STATE_FAULTING)
-
-#define CMD_DMAADDH 0x54
-#define CMD_DMAEND 0x00
-#define CMD_DMAFLUSHP 0x35
-#define CMD_DMAGO 0xa0
-#define CMD_DMALD 0x04
-#define CMD_DMALDP 0x25
-#define CMD_DMALP 0x20
-#define CMD_DMALPEND 0x28
-#define CMD_DMAKILL 0x01
-#define CMD_DMAMOV 0xbc
-#define CMD_DMANOP 0x18
-#define CMD_DMARMB 0x12
-#define CMD_DMASEV 0x34
-#define CMD_DMAST 0x08
-#define CMD_DMASTP 0x29
-#define CMD_DMASTZ 0x0c
-#define CMD_DMAWFE 0x36
-#define CMD_DMAWFP 0x30
-#define CMD_DMAWMB 0x13
-
-#define SZ_DMAADDH 3
-#define SZ_DMAEND 1
-#define SZ_DMAFLUSHP 2
-#define SZ_DMALD 1
-#define SZ_DMALDP 2
-#define SZ_DMALP 2
-#define SZ_DMALPEND 2
-#define SZ_DMAKILL 1
-#define SZ_DMAMOV 6
-#define SZ_DMANOP 1
-#define SZ_DMARMB 1
-#define SZ_DMASEV 2
-#define SZ_DMAST 1
-#define SZ_DMASTP 2
-#define SZ_DMASTZ 1
-#define SZ_DMAWFE 2
-#define SZ_DMAWFP 2
-#define SZ_DMAWMB 1
-#define SZ_DMAGO 6
-
-#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
-#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
-
-#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
-#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
-
-/*
- * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
- * at 1byte/burst for P<->M and M<->M respectively.
- * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
- * should be enough for P<->M and M<->M respectively.
- */
-#define MCODE_BUFF_PER_REQ 256
-
-/* If the _pl330_req is available to the client */
-#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
-
-/* Use this _only_ to wait on transient states */
-#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
-
-#ifdef PL330_DEBUG_MCGEN
-static unsigned cmd_line;
-#define PL330_DBGCMD_DUMP(off, x...) do { \
- printk("%x:", cmd_line); \
- printk(x); \
- cmd_line += off; \
- } while (0)
-#define PL330_DBGMC_START(addr) (cmd_line = addr)
-#else
-#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
-#define PL330_DBGMC_START(addr) do {} while (0)
-#endif
-
-struct _xfer_spec {
- u32 ccr;
- struct pl330_req *r;
- struct pl330_xfer *x;
-};
-
-enum dmamov_dst {
- SAR = 0,
- CCR,
- DAR,
-};
-
-enum pl330_dst {
- SRC = 0,
- DST,
-};
-
-enum pl330_cond {
- SINGLE,
- BURST,
- ALWAYS,
-};
-
-struct _pl330_req {
- u32 mc_bus;
- void *mc_cpu;
- /* Number of bytes taken to setup MC for the req */
- u32 mc_len;
- struct pl330_req *r;
- /* Hook to attach to DMAC's list of reqs with due callback */
- struct list_head rqd;
-};
-
-/* ToBeDone for tasklet */
-struct _pl330_tbd {
- bool reset_dmac;
- bool reset_mngr;
- u8 reset_chan;
-};
-
-/* A DMAC Thread */
-struct pl330_thread {
- u8 id;
- int ev;
- /* If the channel is not yet acquired by any client */
- bool free;
- /* Parent DMAC */
- struct pl330_dmac *dmac;
- /* Only two at a time */
- struct _pl330_req req[2];
- /* Index of the last enqueued request */
- unsigned lstenq;
- /* Index of the last submitted request or -1 if the DMA is stopped */
- int req_running;
-};
-
-enum pl330_dmac_state {
- UNINIT,
- INIT,
- DYING,
-};
-
-/* A DMAC */
-struct pl330_dmac {
- spinlock_t lock;
- /* Holds list of reqs with due callbacks */
- struct list_head req_done;
- /* Pointer to platform specific stuff */
- struct pl330_info *pinfo;
- /* Maximum possible events/irqs */
- int events[32];
- /* BUS address of MicroCode buffer */
- u32 mcode_bus;
- /* CPU address of MicroCode buffer */
- void *mcode_cpu;
- /* List of all Channel threads */
- struct pl330_thread *channels;
- /* Pointer to the MANAGER thread */
- struct pl330_thread *manager;
- /* To handle bad news in interrupt */
- struct tasklet_struct tasks;
- struct _pl330_tbd dmac_tbd;
- /* State of DMAC operation */
- enum pl330_dmac_state state;
-};
-
-static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
-{
- if (r && r->xfer_cb)
- r->xfer_cb(r->token, err);
-}
-
-static inline bool _queue_empty(struct pl330_thread *thrd)
-{
- return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
- ? true : false;
-}
-
-static inline bool _queue_full(struct pl330_thread *thrd)
-{
- return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
- ? false : true;
-}
-
-static inline bool is_manager(struct pl330_thread *thrd)
-{
- struct pl330_dmac *pl330 = thrd->dmac;
-
- /* MANAGER is indexed at the end */
- if (thrd->id == pl330->pinfo->pcfg.num_chan)
- return true;
- else
- return false;
-}
-
-/* If manager of the thread is in Non-Secure mode */
-static inline bool _manager_ns(struct pl330_thread *thrd)
-{
- struct pl330_dmac *pl330 = thrd->dmac;
-
- return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
-}
-
-static inline u32 get_id(struct pl330_info *pi, u32 off)
-{
- void __iomem *regs = pi->base;
- u32 id = 0;
-
- id |= (readb(regs + off + 0x0) << 0);
- id |= (readb(regs + off + 0x4) << 8);
- id |= (readb(regs + off + 0x8) << 16);
- id |= (readb(regs + off + 0xc) << 24);
-
- return id;
-}
-
-static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
- enum pl330_dst da, u16 val)
-{
- if (dry_run)
- return SZ_DMAADDH;
-
- buf[0] = CMD_DMAADDH;
- buf[0] |= (da << 1);
- *((u16 *)&buf[1]) = val;
-
- PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
- da == 1 ? "DA" : "SA", val);
-
- return SZ_DMAADDH;
-}
-
-static inline u32 _emit_END(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMAEND;
-
- buf[0] = CMD_DMAEND;
-
- PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
-
- return SZ_DMAEND;
-}
-
-static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
-{
- if (dry_run)
- return SZ_DMAFLUSHP;
-
- buf[0] = CMD_DMAFLUSHP;
-
- peri &= 0x1f;
- peri <<= 3;
- buf[1] = peri;
-
- PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
-
- return SZ_DMAFLUSHP;
-}
-
-static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
-{
- if (dry_run)
- return SZ_DMALD;
-
- buf[0] = CMD_DMALD;
-
- if (cond == SINGLE)
- buf[0] |= (0 << 1) | (1 << 0);
- else if (cond == BURST)
- buf[0] |= (1 << 1) | (1 << 0);
-
- PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
- cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
-
- return SZ_DMALD;
-}
-
-static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
- enum pl330_cond cond, u8 peri)
-{
- if (dry_run)
- return SZ_DMALDP;
-
- buf[0] = CMD_DMALDP;
-
- if (cond == BURST)
- buf[0] |= (1 << 1);
-
- peri &= 0x1f;
- peri <<= 3;
- buf[1] = peri;
-
- PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
- cond == SINGLE ? 'S' : 'B', peri >> 3);
-
- return SZ_DMALDP;
-}
-
-static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
- unsigned loop, u8 cnt)
-{
- if (dry_run)
- return SZ_DMALP;
-
- buf[0] = CMD_DMALP;
-
- if (loop)
- buf[0] |= (1 << 1);
-
- cnt--; /* DMAC increments by 1 internally */
- buf[1] = cnt;
-
- PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
-
- return SZ_DMALP;
-}
-
-struct _arg_LPEND {
- enum pl330_cond cond;
- bool forever;
- unsigned loop;
- u8 bjump;
-};
-
-static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
- const struct _arg_LPEND *arg)
-{
- enum pl330_cond cond = arg->cond;
- bool forever = arg->forever;
- unsigned loop = arg->loop;
- u8 bjump = arg->bjump;
-
- if (dry_run)
- return SZ_DMALPEND;
-
- buf[0] = CMD_DMALPEND;
-
- if (loop)
- buf[0] |= (1 << 2);
-
- if (!forever)
- buf[0] |= (1 << 4);
-
- if (cond == SINGLE)
- buf[0] |= (0 << 1) | (1 << 0);
- else if (cond == BURST)
- buf[0] |= (1 << 1) | (1 << 0);
-
- buf[1] = bjump;
-
- PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
- forever ? "FE" : "END",
- cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
- loop ? '1' : '0',
- bjump);
-
- return SZ_DMALPEND;
-}
-
-static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMAKILL;
-
- buf[0] = CMD_DMAKILL;
-
- return SZ_DMAKILL;
-}
-
-static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
- enum dmamov_dst dst, u32 val)
-{
- if (dry_run)
- return SZ_DMAMOV;
-
- buf[0] = CMD_DMAMOV;
- buf[1] = dst;
- *((u32 *)&buf[2]) = val;
-
- PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
- dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
-
- return SZ_DMAMOV;
-}
-
-static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMANOP;
-
- buf[0] = CMD_DMANOP;
-
- PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
-
- return SZ_DMANOP;
-}
-
-static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMARMB;
-
- buf[0] = CMD_DMARMB;
-
- PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
-
- return SZ_DMARMB;
-}
-
-static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
-{
- if (dry_run)
- return SZ_DMASEV;
-
- buf[0] = CMD_DMASEV;
-
- ev &= 0x1f;
- ev <<= 3;
- buf[1] = ev;
-
- PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
-
- return SZ_DMASEV;
-}
-
-static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
-{
- if (dry_run)
- return SZ_DMAST;
-
- buf[0] = CMD_DMAST;
-
- if (cond == SINGLE)
- buf[0] |= (0 << 1) | (1 << 0);
- else if (cond == BURST)
- buf[0] |= (1 << 1) | (1 << 0);
-
- PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
- cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
-
- return SZ_DMAST;
-}
-
-static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
- enum pl330_cond cond, u8 peri)
-{
- if (dry_run)
- return SZ_DMASTP;
-
- buf[0] = CMD_DMASTP;
-
- if (cond == BURST)
- buf[0] |= (1 << 1);
-
- peri &= 0x1f;
- peri <<= 3;
- buf[1] = peri;
-
- PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
- cond == SINGLE ? 'S' : 'B', peri >> 3);
-
- return SZ_DMASTP;
-}
-
-static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMASTZ;
-
- buf[0] = CMD_DMASTZ;
-
- PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
-
- return SZ_DMASTZ;
-}
-
-static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
- unsigned invalidate)
-{
- if (dry_run)
- return SZ_DMAWFE;
-
- buf[0] = CMD_DMAWFE;
-
- ev &= 0x1f;
- ev <<= 3;
- buf[1] = ev;
-
- if (invalidate)
- buf[1] |= (1 << 1);
-
- PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
- ev >> 3, invalidate ? ", I" : "");
-
- return SZ_DMAWFE;
-}
-
-static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
- enum pl330_cond cond, u8 peri)
-{
- if (dry_run)
- return SZ_DMAWFP;
-
- buf[0] = CMD_DMAWFP;
-
- if (cond == SINGLE)
- buf[0] |= (0 << 1) | (0 << 0);
- else if (cond == BURST)
- buf[0] |= (1 << 1) | (0 << 0);
- else
- buf[0] |= (0 << 1) | (1 << 0);
-
- peri &= 0x1f;
- peri <<= 3;
- buf[1] = peri;
-
- PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
- cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
-
- return SZ_DMAWFP;
-}
-
-static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMAWMB;
-
- buf[0] = CMD_DMAWMB;
-
- PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
-
- return SZ_DMAWMB;
-}
-
-struct _arg_GO {
- u8 chan;
- u32 addr;
- unsigned ns;
-};
-
-static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
- const struct _arg_GO *arg)
-{
- u8 chan = arg->chan;
- u32 addr = arg->addr;
- unsigned ns = arg->ns;
-
- if (dry_run)
- return SZ_DMAGO;
-
- buf[0] = CMD_DMAGO;
- buf[0] |= (ns << 1);
-
- buf[1] = chan & 0x7;
-
- *((u32 *)&buf[2]) = addr;
-
- return SZ_DMAGO;
-}
-
-#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-
-/* Returns Time-Out */
-static bool _until_dmac_idle(struct pl330_thread *thrd)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- unsigned long loops = msecs_to_loops(5);
-
- do {
- /* Until Manager is Idle */
- if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
- break;
-
- cpu_relax();
- } while (--loops);
-
- if (!loops)
- return true;
-
- return false;
-}
-
-static inline void _execute_DBGINSN(struct pl330_thread *thrd,
- u8 insn[], bool as_manager)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- u32 val;
-
- val = (insn[0] << 16) | (insn[1] << 24);
- if (!as_manager) {
- val |= (1 << 0);
- val |= (thrd->id << 8); /* Channel Number */
- }
- writel(val, regs + DBGINST0);
-
- val = *((u32 *)&insn[2]);
- writel(val, regs + DBGINST1);
-
- /* If timed out due to halted state-machine */
- if (_until_dmac_idle(thrd)) {
- dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
- return;
- }
-
- /* Get going */
- writel(0, regs + DBGCMD);
-}
-
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-static void mark_free(struct pl330_thread *thrd, int idx)
-{
- struct _pl330_req *req = &thrd->req[idx];
-
- _emit_END(0, req->mc_cpu);
- req->mc_len = 0;
-
- thrd->req_running = -1;
-}
-
-static inline u32 _state(struct pl330_thread *thrd)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- u32 val;
-
- if (is_manager(thrd))
- val = readl(regs + DS) & 0xf;
- else
- val = readl(regs + CS(thrd->id)) & 0xf;
-
- switch (val) {
- case DS_ST_STOP:
- return PL330_STATE_STOPPED;
- case DS_ST_EXEC:
- return PL330_STATE_EXECUTING;
- case DS_ST_CMISS:
- return PL330_STATE_CACHEMISS;
- case DS_ST_UPDTPC:
- return PL330_STATE_UPDTPC;
- case DS_ST_WFE:
- return PL330_STATE_WFE;
- case DS_ST_FAULT:
- return PL330_STATE_FAULTING;
- case DS_ST_ATBRR:
- if (is_manager(thrd))
- return PL330_STATE_INVALID;
- else
- return PL330_STATE_ATBARRIER;
- case DS_ST_QBUSY:
- if (is_manager(thrd))
- return PL330_STATE_INVALID;
- else
- return PL330_STATE_QUEUEBUSY;
- case DS_ST_WFP:
- if (is_manager(thrd))
- return PL330_STATE_INVALID;
- else
- return PL330_STATE_WFP;
- case DS_ST_KILL:
- if (is_manager(thrd))
- return PL330_STATE_INVALID;
- else
- return PL330_STATE_KILLING;
- case DS_ST_CMPLT:
- if (is_manager(thrd))
- return PL330_STATE_INVALID;
- else
- return PL330_STATE_COMPLETING;
- case DS_ST_FLTCMP:
- if (is_manager(thrd))
- return PL330_STATE_INVALID;
- else
- return PL330_STATE_FAULT_COMPLETING;
- default:
- return PL330_STATE_INVALID;
- }
-}
-
-static void _stop(struct pl330_thread *thrd)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- u8 insn[6] = {0, 0, 0, 0, 0, 0};
-
- if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
- UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
-
- /* Return if nothing needs to be done */
- if (_state(thrd) == PL330_STATE_COMPLETING
- || _state(thrd) == PL330_STATE_KILLING
- || _state(thrd) == PL330_STATE_STOPPED)
- return;
-
- _emit_KILL(0, insn);
-
- /* Stop generating interrupts for SEV */
- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
-
- _execute_DBGINSN(thrd, insn, is_manager(thrd));
-}
-
-/* Start doing req 'idx' of thread 'thrd' */
-static bool _trigger(struct pl330_thread *thrd)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- struct _pl330_req *req;
- struct pl330_req *r;
- struct _arg_GO go;
- unsigned ns;
- u8 insn[6] = {0, 0, 0, 0, 0, 0};
- int idx;
-
- /* Return if already ACTIVE */
- if (_state(thrd) != PL330_STATE_STOPPED)
- return true;
-
- idx = 1 - thrd->lstenq;
- if (!IS_FREE(&thrd->req[idx]))
- req = &thrd->req[idx];
- else {
- idx = thrd->lstenq;
- if (!IS_FREE(&thrd->req[idx]))
- req = &thrd->req[idx];
- else
- req = NULL;
- }
-
- /* Return if no request */
- if (!req || !req->r)
- return true;
-
- r = req->r;
-
- if (r->cfg)
- ns = r->cfg->nonsecure ? 1 : 0;
- else if (readl(regs + CS(thrd->id)) & CS_CNS)
- ns = 1;
- else
- ns = 0;
-
- /* See 'Abort Sources' point-4 at Page 2-25 */
- if (_manager_ns(thrd) && !ns)
- dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
- __func__, __LINE__);
-
- go.chan = thrd->id;
- go.addr = req->mc_bus;
- go.ns = ns;
- _emit_GO(0, insn, &go);
-
- /* Set to generate interrupts for SEV */
- writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
-
- /* Only manager can execute GO */
- _execute_DBGINSN(thrd, insn, true);
-
- thrd->req_running = idx;
-
- return true;
-}
-
-static bool _start(struct pl330_thread *thrd)
-{
- switch (_state(thrd)) {
- case PL330_STATE_FAULT_COMPLETING:
- UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
-
- if (_state(thrd) == PL330_STATE_KILLING)
- UNTIL(thrd, PL330_STATE_STOPPED)
-
- case PL330_STATE_FAULTING:
- _stop(thrd);
-
- case PL330_STATE_KILLING:
- case PL330_STATE_COMPLETING:
- UNTIL(thrd, PL330_STATE_STOPPED)
-
- case PL330_STATE_STOPPED:
- return _trigger(thrd);
-
- case PL330_STATE_WFP:
- case PL330_STATE_QUEUEBUSY:
- case PL330_STATE_ATBARRIER:
- case PL330_STATE_UPDTPC:
- case PL330_STATE_CACHEMISS:
- case PL330_STATE_EXECUTING:
- return true;
-
- case PL330_STATE_WFE: /* For RESUME, nothing yet */
- default:
- return false;
- }
-}
-
-static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
-{
- int off = 0;
-
- while (cyc--) {
- off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_RMB(dry_run, &buf[off]);
- off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_WMB(dry_run, &buf[off]);
- }
-
- return off;
-}
-
-static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
-{
- int off = 0;
-
- while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
- }
-
- return off;
-}
-
-static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
-{
- int off = 0;
-
- while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
- }
-
- return off;
-}
-
-static int _bursts(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
-{
- int off = 0;
-
- switch (pxs->r->rqtype) {
- case MEMTODEV:
- off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
- break;
- case DEVTOMEM:
- off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
- break;
- case MEMTOMEM:
- off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
- break;
- default:
- off += 0x40000000; /* Scare off the Client */
- break;
- }
-
- return off;
-}
-
-/* Returns bytes consumed and updates bursts */
-static inline int _loop(unsigned dry_run, u8 buf[],
- unsigned long *bursts, const struct _xfer_spec *pxs)
-{
- int cyc, cycmax, szlp, szlpend, szbrst, off;
- unsigned lcnt0, lcnt1, ljmp0, ljmp1;
- struct _arg_LPEND lpend;
-
- /* Max iterations possible in DMALP is 256 */
- if (*bursts >= 256*256) {
- lcnt1 = 256;
- lcnt0 = 256;
- cyc = *bursts / lcnt1 / lcnt0;
- } else if (*bursts > 256) {
- lcnt1 = 256;
- lcnt0 = *bursts / lcnt1;
- cyc = 1;
- } else {
- lcnt1 = *bursts;
- lcnt0 = 0;
- cyc = 1;
- }
-
- szlp = _emit_LP(1, buf, 0, 0);
- szbrst = _bursts(1, buf, pxs, 1);
-
- lpend.cond = ALWAYS;
- lpend.forever = false;
- lpend.loop = 0;
- lpend.bjump = 0;
- szlpend = _emit_LPEND(1, buf, &lpend);
-
- if (lcnt0) {
- szlp *= 2;
- szlpend *= 2;
- }
-
- /*
- * Max bursts that we can unroll due to limit on the
- * size of backward jump that can be encoded in DMALPEND
- * which is 8-bits and hence 255
- */
- cycmax = (255 - (szlp + szlpend)) / szbrst;
-
- cyc = (cycmax < cyc) ? cycmax : cyc;
-
- off = 0;
-
- if (lcnt0) {
- off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
- ljmp0 = off;
- }
-
- off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
- ljmp1 = off;
-
- off += _bursts(dry_run, &buf[off], pxs, cyc);
-
- lpend.cond = ALWAYS;
- lpend.forever = false;
- lpend.loop = 1;
- lpend.bjump = off - ljmp1;
- off += _emit_LPEND(dry_run, &buf[off], &lpend);
-
- if (lcnt0) {
- lpend.cond = ALWAYS;
- lpend.forever = false;
- lpend.loop = 0;
- lpend.bjump = off - ljmp0;
- off += _emit_LPEND(dry_run, &buf[off], &lpend);
- }
-
- *bursts = lcnt1 * cyc;
- if (lcnt0)
- *bursts *= lcnt0;
-
- return off;
-}
-
-static inline int _setup_loops(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
-{
- struct pl330_xfer *x = pxs->x;
- u32 ccr = pxs->ccr;
- unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
- int off = 0;
-
- while (bursts) {
- c = bursts;
- off += _loop(dry_run, &buf[off], &c, pxs);
- bursts -= c;
- }
-
- return off;
-}
-
-static inline int _setup_xfer(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
-{
- struct pl330_xfer *x = pxs->x;
- int off = 0;
-
- /* DMAMOV SAR, x->src_addr */
- off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
- /* DMAMOV DAR, x->dst_addr */
- off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
-
- /* Setup Loop(s) */
- off += _setup_loops(dry_run, &buf[off], pxs);
-
- return off;
-}
-
-/*
- * A req is a sequence of one or more xfer units.
- * Returns the number of bytes taken to setup the MC for the req.
- */
-static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
- unsigned index, struct _xfer_spec *pxs)
-{
- struct _pl330_req *req = &thrd->req[index];
- struct pl330_xfer *x;
- u8 *buf = req->mc_cpu;
- int off = 0;
-
- PL330_DBGMC_START(req->mc_bus);
-
- /* DMAMOV CCR, ccr */
- off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
-
- x = pxs->r->x;
- do {
- /* Error if xfer length is not aligned at burst size */
- if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
- return -EINVAL;
-
- pxs->x = x;
- off += _setup_xfer(dry_run, &buf[off], pxs);
-
- x = x->next;
- } while (x);
-
- /* DMASEV peripheral/event */
- off += _emit_SEV(dry_run, &buf[off], thrd->ev);
- /* DMAEND */
- off += _emit_END(dry_run, &buf[off]);
-
- return off;
-}
-
-static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
-{
- u32 ccr = 0;
-
- if (rqc->src_inc)
- ccr |= CC_SRCINC;
-
- if (rqc->dst_inc)
- ccr |= CC_DSTINC;
-
- /* We set same protection levels for Src and DST for now */
- if (rqc->privileged)
- ccr |= CC_SRCPRI | CC_DSTPRI;
- if (rqc->nonsecure)
- ccr |= CC_SRCNS | CC_DSTNS;
- if (rqc->insnaccess)
- ccr |= CC_SRCIA | CC_DSTIA;
-
- ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
- ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
-
- ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
- ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
-
- ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
- ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
-
- ccr |= (rqc->swap << CC_SWAP_SHFT);
-
- return ccr;
-}
-
-static inline bool _is_valid(u32 ccr)
-{
- enum pl330_dstcachectrl dcctl;
- enum pl330_srccachectrl scctl;
-
- dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
- scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
-
- if (dcctl == DINVALID1 || dcctl == DINVALID2
- || scctl == SINVALID1 || scctl == SINVALID2)
- return false;
- else
- return true;
-}
-
-/*
- * Submit a list of xfers after which the client wants notification.
- * Client is not notified after each xfer unit, just once after all
- * xfer units are done or some error occurs.
- */
-int pl330_submit_req(void *ch_id, struct pl330_req *r)
-{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- struct pl330_info *pi;
- struct _xfer_spec xs;
- unsigned long flags;
- void __iomem *regs;
- unsigned idx;
- u32 ccr;
- int ret = 0;
-
- /* No Req or Unacquired Channel or DMAC */
- if (!r || !thrd || thrd->free)
- return -EINVAL;
-
- pl330 = thrd->dmac;
- pi = pl330->pinfo;
- regs = pi->base;
-
- if (pl330->state == DYING
- || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
- dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
- __func__, __LINE__);
- return -EAGAIN;
- }
-
- /* If request for non-existing peripheral */
- if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
- dev_info(thrd->dmac->pinfo->dev,
- "%s:%d Invalid peripheral(%u)!\n",
- __func__, __LINE__, r->peri);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- if (_queue_full(thrd)) {
- ret = -EAGAIN;
- goto xfer_exit;
- }
-
- /* Prefer Secure Channel */
- if (!_manager_ns(thrd))
- r->cfg->nonsecure = 0;
- else
- r->cfg->nonsecure = 1;
-
- /* Use last settings, if not provided */
- if (r->cfg)
- ccr = _prepare_ccr(r->cfg);
- else
- ccr = readl(regs + CC(thrd->id));
-
- /* If this req doesn't have valid xfer settings */
- if (!_is_valid(ccr)) {
- ret = -EINVAL;
- dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
- __func__, __LINE__, ccr);
- goto xfer_exit;
- }
-
- idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
-
- xs.ccr = ccr;
- xs.r = r;
-
- /* First dry run to check if req is acceptable */
- ret = _setup_req(1, thrd, idx, &xs);
- if (ret < 0)
- goto xfer_exit;
-
- if (ret > pi->mcbufsz / 2) {
- dev_info(thrd->dmac->pinfo->dev,
- "%s:%d Trying increasing mcbufsz\n",
- __func__, __LINE__);
- ret = -ENOMEM;
- goto xfer_exit;
- }
-
- /* Hook the request */
- thrd->lstenq = idx;
- thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
- thrd->req[idx].r = r;
-
- ret = 0;
-
-xfer_exit:
- spin_unlock_irqrestore(&pl330->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(pl330_submit_req);
-
-static void pl330_dotask(unsigned long data)
-{
- struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
- struct pl330_info *pi = pl330->pinfo;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- /* The DMAC itself gone nuts */
- if (pl330->dmac_tbd.reset_dmac) {
- pl330->state = DYING;
- /* Reset the manager too */
- pl330->dmac_tbd.reset_mngr = true;
- /* Clear the reset flag */
- pl330->dmac_tbd.reset_dmac = false;
- }
-
- if (pl330->dmac_tbd.reset_mngr) {
- _stop(pl330->manager);
- /* Reset all channels */
- pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
- /* Clear the reset flag */
- pl330->dmac_tbd.reset_mngr = false;
- }
-
- for (i = 0; i < pi->pcfg.num_chan; i++) {
-
- if (pl330->dmac_tbd.reset_chan & (1 << i)) {
- struct pl330_thread *thrd = &pl330->channels[i];
- void __iomem *regs = pi->base;
- enum pl330_op_err err;
-
- _stop(thrd);
-
- if (readl(regs + FSC) & (1 << thrd->id))
- err = PL330_ERR_FAIL;
- else
- err = PL330_ERR_ABORT;
-
- spin_unlock_irqrestore(&pl330->lock, flags);
-
- _callback(thrd->req[1 - thrd->lstenq].r, err);
- _callback(thrd->req[thrd->lstenq].r, err);
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- thrd->req[0].r = NULL;
- thrd->req[1].r = NULL;
- mark_free(thrd, 0);
- mark_free(thrd, 1);
-
- /* Clear the reset flag */
- pl330->dmac_tbd.reset_chan &= ~(1 << i);
- }
- }
-
- spin_unlock_irqrestore(&pl330->lock, flags);
-
- return;
-}
-
-/* Returns 1 if state was updated, 0 otherwise */
-int pl330_update(const struct pl330_info *pi)
-{
- struct _pl330_req *rqdone;
- struct pl330_dmac *pl330;
- unsigned long flags;
- void __iomem *regs;
- u32 val;
- int id, ev, ret = 0;
-
- if (!pi || !pi->pl330_data)
- return 0;
-
- regs = pi->base;
- pl330 = pi->pl330_data;
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- val = readl(regs + FSM) & 0x1;
- if (val)
- pl330->dmac_tbd.reset_mngr = true;
- else
- pl330->dmac_tbd.reset_mngr = false;
-
- val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
- pl330->dmac_tbd.reset_chan |= val;
- if (val) {
- int i = 0;
- while (i < pi->pcfg.num_chan) {
- if (val & (1 << i)) {
- dev_info(pi->dev,
- "Reset Channel-%d\t CS-%x FTC-%x\n",
- i, readl(regs + CS(i)),
- readl(regs + FTC(i)));
- _stop(&pl330->channels[i]);
- }
- i++;
- }
- }
-
- /* Check which event happened i.e, thread notified */
- val = readl(regs + ES);
- if (pi->pcfg.num_events < 32
- && val & ~((1 << pi->pcfg.num_events) - 1)) {
- pl330->dmac_tbd.reset_dmac = true;
- dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
- ret = 1;
- goto updt_exit;
- }
-
- for (ev = 0; ev < pi->pcfg.num_events; ev++) {
- if (val & (1 << ev)) { /* Event occurred */
- struct pl330_thread *thrd;
- u32 inten = readl(regs + INTEN);
- int active;
-
- /* Clear the event */
- if (inten & (1 << ev))
- writel(1 << ev, regs + INTCLR);
-
- ret = 1;
-
- id = pl330->events[ev];
-
- thrd = &pl330->channels[id];
-
- active = thrd->req_running;
- if (active == -1) /* Aborted */
- continue;
-
- rqdone = &thrd->req[active];
- mark_free(thrd, active);
-
- /* Get going again ASAP */
- _start(thrd);
-
- /* For now, just make a list of callbacks to be done */
- list_add_tail(&rqdone->rqd, &pl330->req_done);
- }
- }
-
- /* Now that we are in no hurry, do the callbacks */
- while (!list_empty(&pl330->req_done)) {
- struct pl330_req *r;
-
- rqdone = container_of(pl330->req_done.next,
- struct _pl330_req, rqd);
-
- list_del_init(&rqdone->rqd);
-
- /* Detach the req */
- r = rqdone->r;
- rqdone->r = NULL;
-
- spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(r, PL330_ERR_NONE);
- spin_lock_irqsave(&pl330->lock, flags);
- }
-
-updt_exit:
- spin_unlock_irqrestore(&pl330->lock, flags);
-
- if (pl330->dmac_tbd.reset_dmac
- || pl330->dmac_tbd.reset_mngr
- || pl330->dmac_tbd.reset_chan) {
- ret = 1;
- tasklet_schedule(&pl330->tasks);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(pl330_update);
-
-int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
-{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- unsigned long flags;
- int ret = 0, active;
-
- if (!thrd || thrd->free || thrd->dmac->state == DYING)
- return -EINVAL;
-
- pl330 = thrd->dmac;
- active = thrd->req_running;
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- switch (op) {
- case PL330_OP_FLUSH:
- /* Make sure the channel is stopped */
- _stop(thrd);
-
- thrd->req[0].r = NULL;
- thrd->req[1].r = NULL;
- mark_free(thrd, 0);
- mark_free(thrd, 1);
- break;
-
- case PL330_OP_ABORT:
- /* Make sure the channel is stopped */
- _stop(thrd);
-
- /* ABORT is only for the active req */
- if (active == -1)
- break;
-
- thrd->req[active].r = NULL;
- mark_free(thrd, active);
-
- /* Start the next */
- case PL330_OP_START:
- if ((active == -1) && !_start(thrd))
- ret = -EIO;
- break;
-
- default:
- ret = -EINVAL;
- }
-
- spin_unlock_irqrestore(&pl330->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(pl330_chan_ctrl);
-
-int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
-{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- struct pl330_info *pi;
- void __iomem *regs;
- int active;
- u32 val;
-
- if (!pstatus || !thrd || thrd->free)
- return -EINVAL;
-
- pl330 = thrd->dmac;
- pi = pl330->pinfo;
- regs = pi->base;
-
- /* The client should remove the DMAC and add again */
- if (pl330->state == DYING)
- pstatus->dmac_halted = true;
- else
- pstatus->dmac_halted = false;
-
- val = readl(regs + FSC);
- if (val & (1 << thrd->id))
- pstatus->faulting = true;
- else
- pstatus->faulting = false;
-
- active = thrd->req_running;
-
- if (active == -1) {
- /* Indicate that the thread is not running */
- pstatus->top_req = NULL;
- pstatus->wait_req = NULL;
- } else {
- pstatus->top_req = thrd->req[active].r;
- pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
- ? thrd->req[1 - active].r : NULL;
- }
-
- pstatus->src_addr = readl(regs + SA(thrd->id));
- pstatus->dst_addr = readl(regs + DA(thrd->id));
-
- return 0;
-}
-EXPORT_SYMBOL(pl330_chan_status);
-
-/* Reserve an event */
-static inline int _alloc_event(struct pl330_thread *thrd)
-{
- struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
- int ev;
-
- for (ev = 0; ev < pi->pcfg.num_events; ev++)
- if (pl330->events[ev] == -1) {
- pl330->events[ev] = thrd->id;
- return ev;
- }
-
- return -1;
-}
-
-static bool _chan_ns(const struct pl330_info *pi, int i)
-{
- return pi->pcfg.irq_ns & (1 << i);
-}
-
-/* Upon success, returns IdentityToken for the
- * allocated channel, NULL otherwise.
- */
-void *pl330_request_channel(const struct pl330_info *pi)
-{
- struct pl330_thread *thrd = NULL;
- struct pl330_dmac *pl330;
- unsigned long flags;
- int chans, i;
-
- if (!pi || !pi->pl330_data)
- return NULL;
-
- pl330 = pi->pl330_data;
-
- if (pl330->state == DYING)
- return NULL;
-
- chans = pi->pcfg.num_chan;
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- for (i = 0; i < chans; i++) {
- thrd = &pl330->channels[i];
- if ((thrd->free) && (!_manager_ns(thrd) ||
- _chan_ns(pi, i))) {
- thrd->ev = _alloc_event(thrd);
- if (thrd->ev >= 0) {
- thrd->free = false;
- thrd->lstenq = 1;
- thrd->req[0].r = NULL;
- mark_free(thrd, 0);
- thrd->req[1].r = NULL;
- mark_free(thrd, 1);
- break;
- }
- }
- thrd = NULL;
- }
-
- spin_unlock_irqrestore(&pl330->lock, flags);
-
- return thrd;
-}
-EXPORT_SYMBOL(pl330_request_channel);
-
-/* Release an event */
-static inline void _free_event(struct pl330_thread *thrd, int ev)
-{
- struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
-
- /* If the event is valid and was held by the thread */
- if (ev >= 0 && ev < pi->pcfg.num_events
- && pl330->events[ev] == thrd->id)
- pl330->events[ev] = -1;
-}
-
-void pl330_release_channel(void *ch_id)
-{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- unsigned long flags;
-
- if (!thrd || thrd->free)
- return;
-
- _stop(thrd);
-
- _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
- _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
-
- pl330 = thrd->dmac;
-
- spin_lock_irqsave(&pl330->lock, flags);
- _free_event(thrd, thrd->ev);
- thrd->free = true;
- spin_unlock_irqrestore(&pl330->lock, flags);
-}
-EXPORT_SYMBOL(pl330_release_channel);
-
-/* Initialize the structure for PL330 configuration, that can be used
- * by the client driver the make best use of the DMAC
- */
-static void read_dmac_config(struct pl330_info *pi)
-{
- void __iomem *regs = pi->base;
- u32 val;
-
- val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
- val &= CRD_DATA_WIDTH_MASK;
- pi->pcfg.data_bus_width = 8 * (1 << val);
-
- val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
- val &= CRD_DATA_BUFF_MASK;
- pi->pcfg.data_buf_dep = val + 1;
-
- val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
- val &= CR0_NUM_CHANS_MASK;
- val += 1;
- pi->pcfg.num_chan = val;
-
- val = readl(regs + CR0);
- if (val & CR0_PERIPH_REQ_SET) {
- val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
- val += 1;
- pi->pcfg.num_peri = val;
- pi->pcfg.peri_ns = readl(regs + CR4);
- } else {
- pi->pcfg.num_peri = 0;
- }
-
- val = readl(regs + CR0);
- if (val & CR0_BOOT_MAN_NS)
- pi->pcfg.mode |= DMAC_MODE_NS;
- else
- pi->pcfg.mode &= ~DMAC_MODE_NS;
-
- val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
- val &= CR0_NUM_EVENTS_MASK;
- val += 1;
- pi->pcfg.num_events = val;
-
- pi->pcfg.irq_ns = readl(regs + CR3);
-
- pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
- pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
-}
-
-static inline void _reset_thread(struct pl330_thread *thrd)
-{
- struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
-
- thrd->req[0].mc_cpu = pl330->mcode_cpu
- + (thrd->id * pi->mcbufsz);
- thrd->req[0].mc_bus = pl330->mcode_bus
- + (thrd->id * pi->mcbufsz);
- thrd->req[0].r = NULL;
- mark_free(thrd, 0);
-
- thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
- + pi->mcbufsz / 2;
- thrd->req[1].mc_bus = thrd->req[0].mc_bus
- + pi->mcbufsz / 2;
- thrd->req[1].r = NULL;
- mark_free(thrd, 1);
-}
-
-static int dmac_alloc_threads(struct pl330_dmac *pl330)
-{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
- struct pl330_thread *thrd;
- int i;
-
- /* Allocate 1 Manager and 'chans' Channel threads */
- pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
- GFP_KERNEL);
- if (!pl330->channels)
- return -ENOMEM;
-
- /* Init Channel threads */
- for (i = 0; i < chans; i++) {
- thrd = &pl330->channels[i];
- thrd->id = i;
- thrd->dmac = pl330;
- _reset_thread(thrd);
- thrd->free = true;
- }
-
- /* MANAGER is indexed at the end */
- thrd = &pl330->channels[chans];
- thrd->id = chans;
- thrd->dmac = pl330;
- thrd->free = false;
- pl330->manager = thrd;
-
- return 0;
-}
-
-static int dmac_alloc_resources(struct pl330_dmac *pl330)
-{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
- int ret;
-
- /*
- * Alloc MicroCode buffer for 'chans' Channel threads.
- * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
- */
- pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
- chans * pi->mcbufsz,
- &pl330->mcode_bus, GFP_KERNEL);
- if (!pl330->mcode_cpu) {
- dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- ret = dmac_alloc_threads(pl330);
- if (ret) {
- dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
- __func__, __LINE__);
- dma_free_coherent(pi->dev,
- chans * pi->mcbufsz,
- pl330->mcode_cpu, pl330->mcode_bus);
- return ret;
- }
-
- return 0;
-}
-
-int pl330_add(struct pl330_info *pi)
-{
- struct pl330_dmac *pl330;
- void __iomem *regs;
- int i, ret;
-
- if (!pi || !pi->dev)
- return -EINVAL;
-
- /* If already added */
- if (pi->pl330_data)
- return -EINVAL;
-
- /*
- * If the SoC can perform reset on the DMAC, then do it
- * before reading its configuration.
- */
- if (pi->dmac_reset)
- pi->dmac_reset(pi);
-
- regs = pi->base;
-
- /* Check if we can handle this DMAC */
- if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
- || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
- dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
- get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
- return -EINVAL;
- }
-
- /* Read the configuration of the DMAC */
- read_dmac_config(pi);
-
- if (pi->pcfg.num_events == 0) {
- dev_err(pi->dev, "%s:%d Can't work without events!\n",
- __func__, __LINE__);
- return -EINVAL;
- }
-
- pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
- if (!pl330) {
- dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- /* Assign the info structure and private data */
- pl330->pinfo = pi;
- pi->pl330_data = pl330;
-
- spin_lock_init(&pl330->lock);
-
- INIT_LIST_HEAD(&pl330->req_done);
-
- /* Use default MC buffer size if not provided */
- if (!pi->mcbufsz)
- pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
-
- /* Mark all events as free */
- for (i = 0; i < pi->pcfg.num_events; i++)
- pl330->events[i] = -1;
-
- /* Allocate resources needed by the DMAC */
- ret = dmac_alloc_resources(pl330);
- if (ret) {
- dev_err(pi->dev, "Unable to create channels for DMAC\n");
- kfree(pl330);
- return ret;
- }
-
- tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
-
- pl330->state = INIT;
-
- return 0;
-}
-EXPORT_SYMBOL(pl330_add);
-
-static int dmac_free_threads(struct pl330_dmac *pl330)
-{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
- struct pl330_thread *thrd;
- int i;
-
- /* Release Channel threads */
- for (i = 0; i < chans; i++) {
- thrd = &pl330->channels[i];
- pl330_release_channel((void *)thrd);
- }
-
- /* Free memory */
- kfree(pl330->channels);
-
- return 0;
-}
-
-static void dmac_free_resources(struct pl330_dmac *pl330)
-{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
-
- dmac_free_threads(pl330);
-
- dma_free_coherent(pi->dev, chans * pi->mcbufsz,
- pl330->mcode_cpu, pl330->mcode_bus);
-}
-
-void pl330_del(struct pl330_info *pi)
-{
- struct pl330_dmac *pl330;
-
- if (!pi || !pi->pl330_data)
- return;
-
- pl330 = pi->pl330_data;
-
- pl330->state = UNINIT;
-
- tasklet_kill(&pl330->tasks);
-
- /* Free DMAC resources */
- dmac_free_resources(pl330);
-
- kfree(pl330);
- pi->pl330_data = NULL;
-}
-EXPORT_SYMBOL(pl330_del);
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 1103f62a1964..a8314c3ee84d 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -57,18 +57,24 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_PCI=y
CONFIG_E100=y
+CONFIG_SMC91X=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIAL_AMBA_PL010=y
CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_ARMCLCD=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PL030=y
CONFIG_EXT2_FS=y
+CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
@@ -78,5 +84,7 @@ CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_PARTITION_ADVANCED=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 23371b17b23e..03fb93621d0d 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -23,6 +23,8 @@
#include <asm/ptrace.h>
#include <asm/domain.h>
+#define IOMEM(x) (x)
+
/*
* Endian independent macros for shifting bytes within registers.
*/
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
new file mode 100644
index 000000000000..2fca60ab513a
--- /dev/null
+++ b/arch/arm/include/asm/cpuidle.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ARM_CPUIDLE_H
+#define __ASM_ARM_CPUIDLE_H
+
+#ifdef CONFIG_CPU_IDLE
+extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+#else
+static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#endif
+
+/* Common ARM WFI state */
+#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
+ .enter = arm_cpuidle_simple_enter,\
+ .exit_latency = 1,\
+ .target_residency = 1,\
+ .power_usage = p,\
+ .flags = CPUIDLE_FLAG_TIME_VALID,\
+ .name = "WFI",\
+ .desc = "ARM WFI",\
+}
+
+/*
+ * in case power_specified == 1, give a default WFI power value needed
+ * by some governors
+ */
+#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
+
+#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d9686e..38050b1c4800 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,8 +130,4 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
-extern int vectors_user_mapping(void);
-#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
-#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
-
#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 7df239bcdf27..c4c87bc12231 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -103,11 +103,11 @@
#define L2X0_ADDR_FILTER_EN 1
#ifndef __ASSEMBLY__
-extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
-extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
+extern int l2x0_of_init(u32 aux_val, u32 aux_mask);
#else
-static inline int l2x0_of_init(__u32 aux_val, __u32 aux_mask)
+static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
{
return -ENODEV;
}
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 59b8c3892f76..122f86d8c991 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -49,7 +49,6 @@ struct iop_adma_device {
/**
* struct iop_adma_chan - internal representation of an ADMA device
* @pending: allows batching of hardware operations
- * @completed_cookie: identifier for the most recently completed operation
* @lock: serializes enqueue/dequeue operations to the slot pool
* @mmr_base: memory mapped register base
* @chain: device chain view of the descriptors
@@ -62,7 +61,6 @@ struct iop_adma_device {
*/
struct iop_adma_chan {
int pending;
- dma_cookie_t completed_cookie;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
struct list_head chain;
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 43cab498bc27..73f84fa4f366 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -9,6 +9,9 @@
#ifndef __ASM_HARDWARE_IT8152_H
#define __ASM_HARDWARE_IT8152_H
+
+#include <mach/irqs.h>
+
extern void __iomem *it8152_base_address;
#define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h
deleted file mode 100644
index c1821385abfa..000000000000
--- a/arch/arm/include/asm/hardware/pl330.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* linux/include/asm/hardware/pl330.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __PL330_CORE_H
-#define __PL330_CORE_H
-
-#define PL330_MAX_CHAN 8
-#define PL330_MAX_IRQS 32
-#define PL330_MAX_PERI 32
-
-enum pl330_srccachectrl {
- SCCTRL0 = 0, /* Noncacheable and nonbufferable */
- SCCTRL1, /* Bufferable only */
- SCCTRL2, /* Cacheable, but do not allocate */
- SCCTRL3, /* Cacheable and bufferable, but do not allocate */
- SINVALID1,
- SINVALID2,
- SCCTRL6, /* Cacheable write-through, allocate on reads only */
- SCCTRL7, /* Cacheable write-back, allocate on reads only */
-};
-
-enum pl330_dstcachectrl {
- DCCTRL0 = 0, /* Noncacheable and nonbufferable */
- DCCTRL1, /* Bufferable only */
- DCCTRL2, /* Cacheable, but do not allocate */
- DCCTRL3, /* Cacheable and bufferable, but do not allocate */
- DINVALID1, /* AWCACHE = 0x1000 */
- DINVALID2,
- DCCTRL6, /* Cacheable write-through, allocate on writes only */
- DCCTRL7, /* Cacheable write-back, allocate on writes only */
-};
-
-/* Populated by the PL330 core driver for DMA API driver's info */
-struct pl330_config {
- u32 periph_id;
- u32 pcell_id;
-#define DMAC_MODE_NS (1 << 0)
- unsigned int mode;
- unsigned int data_bus_width:10; /* In number of bits */
- unsigned int data_buf_dep:10;
- unsigned int num_chan:4;
- unsigned int num_peri:6;
- u32 peri_ns;
- unsigned int num_events:6;
- u32 irq_ns;
-};
-
-/* Handle to the DMAC provided to the PL330 core */
-struct pl330_info {
- /* Owning device */
- struct device *dev;
- /* Size of MicroCode buffers for each channel. */
- unsigned mcbufsz;
- /* ioremap'ed address of PL330 registers. */
- void __iomem *base;
- /* Client can freely use it. */
- void *client_data;
- /* PL330 core data, Client must not touch it. */
- void *pl330_data;
- /* Populated by the PL330 core driver during pl330_add */
- struct pl330_config pcfg;
- /*
- * If the DMAC has some reset mechanism, then the
- * client may want to provide pointer to the method.
- */
- void (*dmac_reset)(struct pl330_info *pi);
-};
-
-enum pl330_byteswap {
- SWAP_NO = 0,
- SWAP_2,
- SWAP_4,
- SWAP_8,
- SWAP_16,
-};
-
-/**
- * Request Configuration.
- * The PL330 core does not modify this and uses the last
- * working configuration if the request doesn't provide any.
- *
- * The Client may want to provide this info only for the
- * first request and a request with new settings.
- */
-struct pl330_reqcfg {
- /* Address Incrementing */
- unsigned dst_inc:1;
- unsigned src_inc:1;
-
- /*
- * For now, the SRC & DST protection levels
- * and burst size/length are assumed same.
- */
- bool nonsecure;
- bool privileged;
- bool insnaccess;
- unsigned brst_len:5;
- unsigned brst_size:3; /* in power of 2 */
-
- enum pl330_dstcachectrl dcctl;
- enum pl330_srccachectrl scctl;
- enum pl330_byteswap swap;
-};
-
-/*
- * One cycle of DMAC operation.
- * There may be more than one xfer in a request.
- */
-struct pl330_xfer {
- u32 src_addr;
- u32 dst_addr;
- /* Size to xfer */
- u32 bytes;
- /*
- * Pointer to next xfer in the list.
- * The last xfer in the req must point to NULL.
- */
- struct pl330_xfer *next;
-};
-
-/* The xfer callbacks are made with one of these arguments. */
-enum pl330_op_err {
- /* The all xfers in the request were success. */
- PL330_ERR_NONE,
- /* If req aborted due to global error. */
- PL330_ERR_ABORT,
- /* If req failed due to problem with Channel. */
- PL330_ERR_FAIL,
-};
-
-enum pl330_reqtype {
- MEMTOMEM,
- MEMTODEV,
- DEVTOMEM,
- DEVTODEV,
-};
-
-/* A request defining Scatter-Gather List ending with NULL xfer. */
-struct pl330_req {
- enum pl330_reqtype rqtype;
- /* Index of peripheral for the xfer. */
- unsigned peri:5;
- /* Unique token for this xfer, set by the client. */
- void *token;
- /* Callback to be called after xfer. */
- void (*xfer_cb)(void *token, enum pl330_op_err err);
- /* If NULL, req will be done at last set parameters. */
- struct pl330_reqcfg *cfg;
- /* Pointer to first xfer in the request. */
- struct pl330_xfer *x;
-};
-
-/*
- * To know the status of the channel and DMAC, the client
- * provides a pointer to this structure. The PL330 core
- * fills it with current information.
- */
-struct pl330_chanstatus {
- /*
- * If the DMAC engine halted due to some error,
- * the client should remove-add DMAC.
- */
- bool dmac_halted;
- /*
- * If channel is halted due to some error,
- * the client should ABORT/FLUSH and START the channel.
- */
- bool faulting;
- /* Location of last load */
- u32 src_addr;
- /* Location of last store */
- u32 dst_addr;
- /*
- * Pointer to the currently active req, NULL if channel is
- * inactive, even though the requests may be present.
- */
- struct pl330_req *top_req;
- /* Pointer to req waiting second in the queue if any. */
- struct pl330_req *wait_req;
-};
-
-enum pl330_chan_op {
- /* Start the channel */
- PL330_OP_START,
- /* Abort the active xfer */
- PL330_OP_ABORT,
- /* Stop xfer and flush queue */
- PL330_OP_FLUSH,
-};
-
-extern int pl330_add(struct pl330_info *);
-extern void pl330_del(struct pl330_info *pi);
-extern int pl330_update(const struct pl330_info *pi);
-extern void pl330_release_channel(void *ch_id);
-extern void *pl330_request_channel(const struct pl330_info *pi);
-extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus);
-extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op);
-extern int pl330_submit_req(void *ch_id, struct pl330_req *r);
-
-#endif /* __PL330_CORE_H */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index bae7eb6011d2..df0ac0bb39aa 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -82,6 +82,11 @@ extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, uns
extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
+extern void __arm_iounmap(volatile void __iomem *addr);
+
+extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+ unsigned int, void *);
+extern void (*arch_iounmap)(volatile void __iomem *);
/*
* Bad read/write accesses...
@@ -96,6 +101,8 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
return (void __iomem *)addr;
}
+#define IOMEM(x) ((void __force __iomem *)(x))
+
/* IO barriers */
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#include <asm/barrier.h>
@@ -109,7 +116,11 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
/*
* Now, pick up the machine-defined IO definitions
*/
+#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
+#else
+#define __io(a) ({ (void)(a); __typesafe_io(0); })
+#endif
/*
* This is the limit of PC card/PCI/ISA IO space, which is by default
@@ -211,18 +222,18 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* Again, this are defined to perform little endian accesses. See the
* IO port primitives for more information.
*/
-#ifdef __mem_pci
-#define readb_relaxed(c) ({ u8 __r = __raw_readb(__mem_pci(c)); __r; })
+#ifndef readl
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
- __raw_readw(__mem_pci(c))); __r; })
+ __raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
- __raw_readl(__mem_pci(c))); __r; })
+ __raw_readl(c)); __r; })
-#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
+#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
- cpu_to_le16(v),__mem_pci(c)))
+ cpu_to_le16(v),c))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
- cpu_to_le32(v),__mem_pci(c)))
+ cpu_to_le32(v),c))
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -232,30 +243,19 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
-#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l)
-#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)
-#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l)
-
-#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l)
-#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l)
-#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l)
+#define readsb(p,d,l) __raw_readsb(p,d,l)
+#define readsw(p,d,l) __raw_readsw(p,d,l)
+#define readsl(p,d,l) __raw_readsl(p,d,l)
-#define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l))
-#define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l))
-#define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l))
+#define writesb(p,d,l) __raw_writesb(p,d,l)
+#define writesw(p,d,l) __raw_writesw(p,d,l)
+#define writesl(p,d,l) __raw_writesl(p,d,l)
-#elif !defined(readb)
+#define memset_io(c,v,l) _memset_io(c,(v),(l))
+#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
+#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
-#define readb(c) (__readwrite_bug("readb"),0)
-#define readw(c) (__readwrite_bug("readw"),0)
-#define readl(c) (__readwrite_bug("readl"),0)
-#define writeb(v,c) __readwrite_bug("writeb")
-#define writew(v,c) __readwrite_bug("writew")
-#define writel(v,c) __readwrite_bug("writel")
-
-#define check_signature(io,sig,len) (0)
-
-#endif /* __mem_pci */
+#endif /* readl */
/*
* ioremap and friends.
@@ -264,16 +264,11 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* Documentation/io-mapping.txt.
*
*/
-#ifndef __arch_ioremap
-#define __arch_ioremap __arm_ioremap
-#define __arch_iounmap __iounmap
-#endif
-
-#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
-#define iounmap __arch_iounmap
+#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
+#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
+#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
+#define iounmap __arm_iounmap
/*
* io{read,write}{8,16,32} macros
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 5a526afb5f18..35c21c375d81 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -1,14 +1,18 @@
#ifndef __ASM_ARM_IRQ_H
#define __ASM_ARM_IRQ_H
+#define NR_IRQS_LEGACY 16
+
+#ifndef CONFIG_SPARSE_IRQ
#include <mach/irqs.h>
+#else
+#define NR_IRQS NR_IRQS_LEGACY
+#endif
#ifndef irq_canonicalize
#define irq_canonicalize(i) (i)
#endif
-#define NR_IRQS_LEGACY 16
-
/*
* Use this value to indicate lack of interrupt
* capability
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
new file mode 100644
index 000000000000..5c5ca2ea62b0
--- /dev/null
+++ b/arch/arm/include/asm/jump_label.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_ARM_JUMP_LABEL_H
+#define _ASM_ARM_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define JUMP_LABEL_NOP "nop.w"
+#else
+#define JUMP_LABEL_NOP "nop"
+#endif
+
+static __always_inline bool arch_static_branch(struct jump_label_key *key)
+{
+ asm goto("1:\n\t"
+ JUMP_LABEL_NOP "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (key) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __KERNEL__ */
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
index 6b884d2b0b69..e8567bb99dfc 100644
--- a/arch/arm/include/asm/mc146818rtc.h
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -5,7 +5,9 @@
#define _ASM_MC146818RTC_H
#include <linux/io.h>
-#include <mach/irqs.h>
+#include <linux/kernel.h>
+
+#define RTC_IRQ BUILD_BUG_ON(1)
#ifndef RTC_PORT
#define RTC_PORT(x) (0x70 + (x))
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index a8997d71084e..fcb575747e5e 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -116,6 +116,8 @@
#define MODULES_END (END_MEM)
#define MODULES_VADDR (PHYS_OFFSET)
+#define XIP_VIRT_ADDR(physaddr) (physaddr)
+
#endif /* !CONFIG_MMU */
/*
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 71605d9f8e42..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/proc-fns.h>
+#include <asm-generic/mm_hooks.h>
void __check_kvm_seq(struct mm_struct *mm);
@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
-/*
- * We are inserting a "fake" vma for the user-accessible vector page so
- * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
- * But we also want to remove it before the generic code gets to see it
- * during process exit or the unmapping of it would cause total havoc.
- * (the macro is used as remove_vma() is static to mm/mmap.c)
- */
-#define arch_exit_mmap(mm) \
-do { \
- struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
- if (high_vma) { \
- BUG_ON(high_vma->vm_next); /* it should be last */ \
- if (high_vma->vm_prev) \
- high_vma->vm_prev->vm_next = NULL; \
- else \
- mm->mmap = NULL; \
- rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
- mm->mmap_cache = NULL; \
- mm->map_count--; \
- remove_vma(high_vma); \
- } \
-} while (0)
-
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
- struct mm_struct *mm)
-{
-}
-
#endif
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index c0efdd60966f..19c48deda70f 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -17,4 +17,63 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#define ARM_OPCODE_CONDTEST_PASS 1
#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+/*
+ * Opcode byteswap helpers
+ *
+ * These macros help with converting instructions between a canonical integer
+ * format and in-memory representation, in an endianness-agnostic manner.
+ *
+ * __mem_to_opcode_*() convert from in-memory representation to canonical form.
+ * __opcode_to_mem_*() convert from canonical form to in-memory representation.
+ *
+ *
+ * Canonical instruction representation:
+ *
+ * ARM: 0xKKLLMMNN
+ * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8
+ * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8
+ *
+ * There is no way to distinguish an ARM instruction in canonical representation
+ * from a Thumb instruction (just as these cannot be distinguished in memory).
+ * Where this distinction is important, it needs to be tracked separately.
+ *
+ * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
+ * represent any valid Thumb-2 instruction. For this range,
+ * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/swab.h>
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define __opcode_to_mem_arm(x) swab32(x)
+#define __opcode_to_mem_thumb16(x) swab16(x)
+#define __opcode_to_mem_thumb32(x) swahb32(x)
+#else
+#define __opcode_to_mem_arm(x) ((u32)(x))
+#define __opcode_to_mem_thumb16(x) ((u16)(x))
+#define __opcode_to_mem_thumb32(x) swahw32(x)
+#endif
+
+#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
+#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
+#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
+
+/* Operations specific to Thumb opcodes */
+
+/* Instruction size checks: */
+#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL)
+#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL)
+
+/* Operations to construct or split 32-bit Thumb instructions: */
+#define __opcode_thumb32_first(x) ((u16)((x) >> 16))
+#define __opcode_thumb32_second(x) ((u16)(x))
+#define __opcode_thumb32_compose(first, second) \
+ (((u32)(u16)(first) << 16) | (u32)(u16)(second))
+
+#endif /* __ASSEMBLY__ */
+
#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 97b440c25c58..5838361c48b3 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#define __HAVE_ARCH_GATE_AREA 1
+
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
#else
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 7523340afb8a..00cbe10a50e3 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -22,6 +22,7 @@ enum arm_perf_pmu_ids {
ARM_PERF_PMU_ID_CA9,
ARM_PERF_PMU_ID_CA5,
ARM_PERF_PMU_ID_CA15,
+ ARM_PERF_PMU_ID_CA7,
ARM_NUM_PMU_IDS,
};
diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h
index 2446d23bfdbf..efdf99045d87 100644
--- a/arch/arm/include/asm/posix_types.h
+++ b/arch/arm/include/asm/posix_types.h
@@ -19,59 +19,22 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_uid_t __kernel_uid_t
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-#define __FD_SET(fd, fdsetp) \
- (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1<<((fd) & 31)))
-
-#undef __FD_CLR
-#define __FD_CLR(fd, fdsetp) \
- (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] &= ~(1<<((fd) & 31)))
-
-#undef __FD_ISSET
-#define __FD_ISSET(fd, fdsetp) \
- ((((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] & (1<<((fd) & 31))) != 0)
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) \
- (memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp))))
-
-#endif
+#include <asm-generic/posix_types.h>
#endif
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index f4d7f56ee51f..5ac8d3d3e025 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -55,7 +55,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
- set_fs(USER_DS); \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index ee0363307918..aeae9c609df4 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -13,8 +13,6 @@
#ifdef CONFIG_OF
-#include <asm/irq.h>
-
extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
extern void arm_dt_memblock_reserve(void);
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 02b2f8203982..85fe61e73202 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -318,6 +318,21 @@ extern struct cpu_tlb_fns cpu_tlb;
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
+#define __tlb_op(f, insnarg, arg) \
+ do { \
+ if (always_tlb_flags & (f)) \
+ asm("mcr " insnarg \
+ : : "r" (arg) : "cc"); \
+ else if (possible_tlb_flags & (f)) \
+ asm("tst %1, %2\n\t" \
+ "mcrne " insnarg \
+ : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
+ : "cc"); \
+ } while (0)
+
+#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
+#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
+
static inline void local_flush_tlb_all(void)
{
const int zero = 0;
@@ -326,16 +341,11 @@ static inline void local_flush_tlb_all(void)
if (tlb_flag(TLB_WB))
dsb();
- if (tlb_flag(TLB_V3_FULL))
- asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
- asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
- asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
- asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V7_UIS_FULL))
- asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
+ tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
+ tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
+ tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
+ tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
+ tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
if (tlb_flag(TLB_BARRIER)) {
dsb();
@@ -352,29 +362,23 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
- if (tlb_flag(TLB_V3_FULL))
- asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V4_U_FULL))
- asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V4_D_FULL))
- asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V4_I_FULL))
- asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+ if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+ if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+ tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
+ tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
+ tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
+ tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
+ }
+ put_cpu();
}
- put_cpu();
-
- if (tlb_flag(TLB_V6_U_ASID))
- asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
- if (tlb_flag(TLB_V6_D_ASID))
- asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
- if (tlb_flag(TLB_V6_I_ASID))
- asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
- if (tlb_flag(TLB_V7_UIS_ASID))
+
+ tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
+ tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
+ tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
#ifdef CONFIG_ARM_ERRATA_720789
- asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
#else
- asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
#endif
if (tlb_flag(TLB_BARRIER))
@@ -392,30 +396,23 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- if (tlb_flag(TLB_V3_PAGE))
- asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
- if (tlb_flag(TLB_V4_U_PAGE))
- asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
- if (tlb_flag(TLB_V4_D_PAGE))
- asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
- if (tlb_flag(TLB_V4_I_PAGE))
- asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
+ if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+ cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+ tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
+ tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
+ tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
+ tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
}
- if (tlb_flag(TLB_V6_U_PAGE))
- asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
- if (tlb_flag(TLB_V6_D_PAGE))
- asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
- if (tlb_flag(TLB_V6_I_PAGE))
- asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
- if (tlb_flag(TLB_V7_UIS_PAGE))
+ tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
+ tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
+ tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
#ifdef CONFIG_ARM_ERRATA_720789
- asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
#else
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
#endif
if (tlb_flag(TLB_BARRIER))
@@ -432,25 +429,17 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (tlb_flag(TLB_V3_PAGE))
- asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_V4_U_PAGE))
- asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_V4_D_PAGE))
- asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_V4_I_PAGE))
- asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
+ tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
+ tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
+ tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
+ tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_V6_U_PAGE))
- asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_V6_D_PAGE))
- asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_V6_I_PAGE))
- asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_V7_UIS_PAGE))
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
+ tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
+ tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
+ tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
if (tlb_flag(TLB_BARRIER)) {
dsb();
@@ -475,13 +464,8 @@ static inline void flush_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
- if (tlb_flag(TLB_DCLEAN))
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
- : : "r" (pmd) : "cc");
-
- if (tlb_flag(TLB_L2CLEAN_FR))
- asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd"
- : : "r" (pmd) : "cc");
+ tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
+ tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
if (tlb_flag(TLB_WB))
dsb();
@@ -491,15 +475,11 @@ static inline void clean_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
- if (tlb_flag(TLB_DCLEAN))
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
- : : "r" (pmd) : "cc");
-
- if (tlb_flag(TLB_L2CLEAN_FR))
- asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd"
- : : "r" (pmd) : "cc");
+ tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
+ tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
}
+#undef tlb_op
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 5b29a6673625..f555bb3664dc 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -46,7 +46,7 @@ static inline int in_exception_text(unsigned long ptr)
return in ? : __in_irqentry_text(ptr);
}
-extern void __init early_trap_init(void);
+extern void __init early_trap_init(void *);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 3a274878412e..7b787d642af4 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_insn.o = -pg
+CFLAGS_REMOVE_patch.o = -pg
endif
CFLAGS_REMOVE_return_address.o = -pg
@@ -14,14 +16,14 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o return_address.o setup.o signal.o \
- sys_arm.o stacktrace.o time.o traps.o
+ process.o ptrace.o return_address.o sched_clock.o \
+ setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
obj-$(CONFIG_LEDS) += leds.o
obj-$(CONFIG_OC_ETM) += etm.o
-
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
@@ -29,14 +31,14 @@ obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
-obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o
+obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o
ifdef CONFIG_THUMB2_KERNEL
obj-$(CONFIG_KPROBES) += kprobes-thumb.o
else
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
new file mode 100644
index 000000000000..89545f6c8403
--- /dev/null
+++ b/arch/arm/kernel/cpuidle.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/cpuidle.h>
+#include <asm/proc-fns.h>
+
+int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ cpu_do_idle();
+
+ return index;
+}
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 204e2160cfcc..c45522c36787 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -10,6 +10,7 @@
* 32-bit debugging code
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
.text
@@ -100,7 +101,7 @@
#endif /* CONFIG_CPU_V6 */
-#else
+#elif !defined(CONFIG_DEBUG_SEMIHOSTING)
#include <mach/debug-macro.S>
#endif /* CONFIG_DEBUG_ICEDCC */
@@ -155,6 +156,8 @@ hexbuf: .space 16
.ltorg
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
ENTRY(printascii)
addruart_current r3, r1, r2
b 2f
@@ -177,3 +180,24 @@ ENTRY(printch)
mov r0, #0
b 1b
ENDPROC(printch)
+
+#else
+
+ENTRY(printascii)
+ mov r1, r0
+ mov r0, #0x04 @ SYS_WRITE0
+ ARM( svc #0x123456 )
+ THUMB( svc #0xab )
+ mov pc, lr
+ENDPROC(printascii)
+
+ENTRY(printch)
+ adr r1, hexbuf
+ strb r0, [r1]
+ mov r0, #0x03 @ SYS_WRITEC
+ ARM( svc #0x123456 )
+ THUMB( svc #0xab )
+ mov pc, lr
+ENDPROC(printch)
+
+#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 8ec5eed55e37..7fd3ad048da9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -15,6 +15,7 @@
* that causes it to save wrong values... Be aware!
*/
+#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index c0062ad1e847..df0bf0c8cb79 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -16,10 +16,13 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/opcodes.h>
#include <asm/ftrace.h>
+#include "insn.h"
+
#ifdef CONFIG_THUMB2_KERNEL
-#define NOP 0xeb04f85d /* pop.w {lr} */
+#define NOP 0xf85deb04 /* pop.w {lr} */
#else
#define NOP 0xe8bd4000 /* pop {lr} */
#endif
@@ -60,76 +63,31 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
}
#endif
-#ifdef CONFIG_THUMB2_KERNEL
-static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
- bool link)
-{
- unsigned long s, j1, j2, i1, i2, imm10, imm11;
- unsigned long first, second;
- long offset;
-
- offset = (long)addr - (long)(pc + 4);
- if (offset < -16777216 || offset > 16777214) {
- WARN_ON_ONCE(1);
- return 0;
- }
-
- s = (offset >> 24) & 0x1;
- i1 = (offset >> 23) & 0x1;
- i2 = (offset >> 22) & 0x1;
- imm10 = (offset >> 12) & 0x3ff;
- imm11 = (offset >> 1) & 0x7ff;
-
- j1 = (!i1) ^ s;
- j2 = (!i2) ^ s;
-
- first = 0xf000 | (s << 10) | imm10;
- second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
- if (link)
- second |= 1 << 14;
-
- return (second << 16) | first;
-}
-#else
-static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
- bool link)
-{
- unsigned long opcode = 0xea000000;
- long offset;
-
- if (link)
- opcode |= 1 << 24;
-
- offset = (long)addr - (long)(pc + 8);
- if (unlikely(offset < -33554432 || offset > 33554428)) {
- /* Can't generate branches that far (from ARM ARM). Ftrace
- * doesn't generate branches outside of kernel text.
- */
- WARN_ON_ONCE(1);
- return 0;
- }
-
- offset = (offset >> 2) & 0x00ffffff;
-
- return opcode | offset;
-}
-#endif
-
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
- return ftrace_gen_branch(pc, addr, true);
+ return arm_gen_branch_link(pc, addr);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
- unsigned long new)
+ unsigned long new, bool validate)
{
unsigned long replaced;
- if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
- return -EFAULT;
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ old = __opcode_to_mem_thumb32(old);
+ new = __opcode_to_mem_thumb32(new);
+ } else {
+ old = __opcode_to_mem_arm(old);
+ new = __opcode_to_mem_arm(new);
+ }
- if (replaced != old)
- return -EINVAL;
+ if (validate) {
+ if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
return -EPERM;
@@ -141,23 +99,21 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
int ftrace_update_ftrace_func(ftrace_func_t func)
{
- unsigned long pc, old;
+ unsigned long pc;
unsigned long new;
int ret;
pc = (unsigned long)&ftrace_call;
- memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, (unsigned long)func);
- ret = ftrace_modify_code(pc, old, new);
+ ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_OLD_MCOUNT
if (!ret) {
pc = (unsigned long)&ftrace_call_old;
- memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, (unsigned long)func);
- ret = ftrace_modify_code(pc, old, new);
+ ret = ftrace_modify_code(pc, 0, new, false);
}
#endif
@@ -172,7 +128,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
old = ftrace_nop_replace(rec);
new = ftrace_call_replace(ip, adjust_address(rec, addr));
- return ftrace_modify_code(rec->ip, old, new);
+ return ftrace_modify_code(rec->ip, old, new, true);
}
int ftrace_make_nop(struct module *mod,
@@ -185,7 +141,7 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, adjust_address(rec, addr));
new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new);
+ ret = ftrace_modify_code(ip, old, new, true);
#ifdef CONFIG_OLD_MCOUNT
if (ret == -EINVAL && addr == MCOUNT_ADDR) {
@@ -193,7 +149,7 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, adjust_address(rec, addr));
new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new);
+ ret = ftrace_modify_code(ip, old, new, true);
}
#endif
@@ -249,12 +205,12 @@ static int __ftrace_modify_caller(unsigned long *callsite,
{
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
- unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
+ unsigned long branch = arm_gen_branch(pc, caller_fn);
unsigned long nop = 0xe1a00000; /* mov r0, r0 */
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
- return ftrace_modify_code(pc, old, new);
+ return ftrace_modify_code(pc, old, new, true);
}
static int ftrace_modify_graph_caller(bool enable)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index a2e9694a68ee..3bf0c7f8b043 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -265,7 +265,7 @@ __create_page_tables:
str r6, [r3]
#ifdef CONFIG_DEBUG_LL
-#ifndef CONFIG_DEBUG_ICEDCC
+#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
* Map in IO space for serial debugging.
* This allows debug messages to be output
@@ -297,10 +297,10 @@ __create_page_tables:
cmp r0, r6
blo 1b
-#else /* CONFIG_DEBUG_ICEDCC */
- /* we don't need any serial debugging mappings for ICEDCC */
+#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
+ /* we don't need any serial debugging mappings */
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
-#endif /* !CONFIG_DEBUG_ICEDCC */
+#endif
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
/*
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
new file mode 100644
index 000000000000..ab312e516546
--- /dev/null
+++ b/arch/arm/kernel/insn.c
@@ -0,0 +1,61 @@
+#include <linux/kernel.h>
+#include <asm/opcodes.h>
+
+static unsigned long
+__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
+{
+ unsigned long s, j1, j2, i1, i2, imm10, imm11;
+ unsigned long first, second;
+ long offset;
+
+ offset = (long)addr - (long)(pc + 4);
+ if (offset < -16777216 || offset > 16777214) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ s = (offset >> 24) & 0x1;
+ i1 = (offset >> 23) & 0x1;
+ i2 = (offset >> 22) & 0x1;
+ imm10 = (offset >> 12) & 0x3ff;
+ imm11 = (offset >> 1) & 0x7ff;
+
+ j1 = (!i1) ^ s;
+ j2 = (!i2) ^ s;
+
+ first = 0xf000 | (s << 10) | imm10;
+ second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
+ if (link)
+ second |= 1 << 14;
+
+ return __opcode_thumb32_compose(first, second);
+}
+
+static unsigned long
+__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
+{
+ unsigned long opcode = 0xea000000;
+ long offset;
+
+ if (link)
+ opcode |= 1 << 24;
+
+ offset = (long)addr - (long)(pc + 8);
+ if (unlikely(offset < -33554432 || offset > 33554428)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ offset = (offset >> 2) & 0x00ffffff;
+
+ return opcode | offset;
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
+{
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
+ return __arm_gen_branch_thumb2(pc, addr, link);
+ else
+ return __arm_gen_branch_arm(pc, addr, link);
+}
diff --git a/arch/arm/kernel/insn.h b/arch/arm/kernel/insn.h
new file mode 100644
index 000000000000..e96065da4dae
--- /dev/null
+++ b/arch/arm/kernel/insn.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ARM_INSN_H
+#define __ASM_ARM_INSN_H
+
+static inline unsigned long
+arm_gen_nop(void)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ return 0xf3af8000; /* nop.w */
+#else
+ return 0xe1a00000; /* mov r0, r0 */
+#endif
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+
+static inline unsigned long
+arm_gen_branch(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, false);
+}
+
+static inline unsigned long
+arm_gen_branch_link(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, true);
+}
+
+#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6a6a097edd61..71ccdbfed662 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -180,10 +180,7 @@ void migrate_irqs(void)
local_irq_save(flags);
for_each_irq_desc(i, desc) {
- bool affinity_broken = false;
-
- if (!desc)
- continue;
+ bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
new file mode 100644
index 000000000000..4ce4f789446d
--- /dev/null
+++ b/arch/arm/kernel/jump_label.c
@@ -0,0 +1,39 @@
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+
+#include "insn.h"
+#include "patch.h"
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ unsigned int insn;
+
+ if (type == JUMP_LABEL_ENABLE)
+ insn = arm_gen_branch(entry->code, entry->target);
+ else
+ insn = arm_gen_nop();
+
+ if (is_static)
+ __patch_text(addr, insn);
+ else
+ patch_text(addr, insn);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 129c1163248b..ab1869dac97a 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -29,6 +29,7 @@
#include <asm/cacheflush.h>
#include "kprobes.h"
+#include "patch.h"
#define MIN_STACK_SIZE(addr) \
min((unsigned long)MAX_STACK_SIZE, \
@@ -103,57 +104,33 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
-#ifdef CONFIG_THUMB2_KERNEL
-
-/*
- * For a 32-bit Thumb breakpoint spanning two memory words we need to take
- * special precautions to insert the breakpoint atomically, especially on SMP
- * systems. This is achieved by calling this arming function using stop_machine.
- */
-static int __kprobes set_t32_breakpoint(void *addr)
-{
- ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
- ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
- flush_insns(addr, 2*sizeof(u16));
- return 0;
-}
-
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */
-
- if (!is_wide_instruction(p->opcode)) {
- *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
- flush_insns(addr, sizeof(u16));
- } else if (addr & 2) {
- /* A 32-bit instruction spanning two words needs special care */
- stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map);
+ unsigned int brkp;
+ void *addr;
+
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ /* Remove any Thumb flag */
+ addr = (void *)((uintptr_t)p->addr & ~1);
+
+ if (is_wide_instruction(p->opcode))
+ brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
+ else
+ brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
} else {
- /* Word aligned 32-bit instruction can be written atomically */
- u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
-#ifndef __ARMEB__ /* Swap halfwords for little-endian */
- bkp = (bkp >> 16) | (bkp << 16);
-#endif
- *(u32 *)addr = bkp;
- flush_insns(addr, sizeof(u32));
- }
-}
+ kprobe_opcode_t insn = p->opcode;
-#else /* !CONFIG_THUMB2_KERNEL */
+ addr = p->addr;
+ brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- kprobe_opcode_t insn = p->opcode;
- kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
- if (insn >= 0xe0000000)
- brkp |= 0xe0000000; /* Unconditional instruction */
- else
- brkp |= insn & 0xf0000000; /* Copy condition from insn */
- *p->addr = brkp;
- flush_insns(p->addr, sizeof(p->addr[0]));
-}
+ if (insn >= 0xe0000000)
+ brkp |= 0xe0000000; /* Unconditional instruction */
+ else
+ brkp |= insn & 0xf0000000; /* Copy condition from insn */
+ }
-#endif /* !CONFIG_THUMB2_KERNEL */
+ patch_text(addr, brkp);
+}
/*
* The actual disarming is done here on each CPU and synchronized using
@@ -166,25 +143,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
int __kprobes __arch_disarm_kprobe(void *p)
{
struct kprobe *kp = p;
-#ifdef CONFIG_THUMB2_KERNEL
- u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
- kprobe_opcode_t insn = kp->opcode;
- unsigned int len;
+ void *addr = (void *)((uintptr_t)kp->addr & ~1);
- if (is_wide_instruction(insn)) {
- ((u16 *)addr)[0] = insn>>16;
- ((u16 *)addr)[1] = insn;
- len = 2*sizeof(u16);
- } else {
- ((u16 *)addr)[0] = insn;
- len = sizeof(u16);
- }
- flush_insns(addr, len);
+ __patch_text(addr, kp->opcode);
-#else /* !CONFIG_THUMB2_KERNEL */
- *kp->addr = kp->opcode;
- flush_insns(kp->addr, sizeof(kp->addr[0]));
-#endif
return 0;
}
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 56995983eed8..dfcdb9f7c126 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -53,6 +54,29 @@ void machine_crash_nonpanic_core(void *unused)
cpu_relax();
}
+static void machine_kexec_mask_interrupts(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+ if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+ chip->irq_disable(&desc->irq_data);
+ }
+}
+
void machine_crash_shutdown(struct pt_regs *regs)
{
unsigned long msecs;
@@ -70,6 +94,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
crash_save_cpu(regs, smp_processor_id());
+ machine_kexec_mask_interrupts();
printk(KERN_INFO "Loading crashdump kernel...\n");
}
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
new file mode 100644
index 000000000000..07314af47733
--- /dev/null
+++ b/arch/arm/kernel/patch.c
@@ -0,0 +1,75 @@
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/stop_machine.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/opcodes.h>
+
+#include "patch.h"
+
+struct patch {
+ void *addr;
+ unsigned int insn;
+};
+
+void __kprobes __patch_text(void *addr, unsigned int insn)
+{
+ bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+ int size;
+
+ if (thumb2 && __opcode_is_thumb16(insn)) {
+ *(u16 *)addr = __opcode_to_mem_thumb16(insn);
+ size = sizeof(u16);
+ } else if (thumb2 && ((uintptr_t)addr & 2)) {
+ u16 first = __opcode_thumb32_first(insn);
+ u16 second = __opcode_thumb32_second(insn);
+ u16 *addrh = addr;
+
+ addrh[0] = __opcode_to_mem_thumb16(first);
+ addrh[1] = __opcode_to_mem_thumb16(second);
+
+ size = sizeof(u32);
+ } else {
+ if (thumb2)
+ insn = __opcode_to_mem_thumb32(insn);
+ else
+ insn = __opcode_to_mem_arm(insn);
+
+ *(u32 *)addr = insn;
+ size = sizeof(u32);
+ }
+
+ flush_icache_range((uintptr_t)(addr),
+ (uintptr_t)(addr) + size);
+}
+
+static int __kprobes patch_text_stop_machine(void *data)
+{
+ struct patch *patch = data;
+
+ __patch_text(patch->addr, patch->insn);
+
+ return 0;
+}
+
+void __kprobes patch_text(void *addr, unsigned int insn)
+{
+ struct patch patch = {
+ .addr = addr,
+ .insn = insn,
+ };
+
+ if (cache_ops_need_broadcast()) {
+ stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
+ } else {
+ bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
+ && __opcode_is_thumb32(insn)
+ && ((uintptr_t)addr & 2);
+
+ if (straddles_word)
+ stop_machine(patch_text_stop_machine, &patch, NULL);
+ else
+ __patch_text(addr, insn);
+ }
+}
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h
new file mode 100644
index 000000000000..b4731f2dac38
--- /dev/null
+++ b/arch/arm/kernel/patch.h
@@ -0,0 +1,7 @@
+#ifndef _ARM_KERNEL_PATCH_H
+#define _ARM_KERNEL_PATCH_H
+
+void patch_text(void *addr, unsigned int insn);
+void __patch_text(void *addr, unsigned int insn);
+
+#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8a89d3b7626b..186c8cb982c5 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -738,6 +738,9 @@ init_hw_perf_events(void)
case 0xC0F0: /* Cortex-A15 */
cpu_pmu = armv7_a15_pmu_init();
break;
+ case 0xC070: /* Cortex-A7 */
+ cpu_pmu = armv7_a7_pmu_init();
+ break;
}
/* Intel CPUs [xscale]. */
} else if (0x69 == implementor) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4d7095af2ab3..00755d82e2f2 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -610,6 +610,130 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
};
/*
+ * Cortex-A7 HW events mapping
+ */
+static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
* Perf Events' indices
*/
#define ARMV7_IDX_CYCLE_COUNTER 0
@@ -1104,6 +1228,12 @@ static int armv7_a15_map_event(struct perf_event *event)
&armv7_a15_perf_cache_map, 0xFF);
}
+static int armv7_a7_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv7_a7_perf_map,
+ &armv7_a7_perf_cache_map, 0xFF);
+}
+
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
@@ -1164,6 +1294,16 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu;
}
+
+static struct arm_pmu *__init armv7_a7_pmu_init(void)
+{
+ armv7pmu.id = ARM_PERF_PMU_ID_CA7;
+ armv7pmu.name = "ARMv7 Cortex-A7";
+ armv7pmu.map_event = armv7_a7_map_event;
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
+ armv7pmu.set_event_filter = armv7pmu_set_event_filter;
+ return &armv7pmu;
+}
#else
static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
@@ -1184,4 +1324,9 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
return NULL;
}
+
+static struct arm_pmu *__init armv7_a7_pmu_init(void)
+{
+ return NULL;
+}
#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7b9cddef6e53..2b7b017a20cd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -528,21 +528,39 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
#ifdef CONFIG_MMU
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Let's declare a mapping
- * for it so it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers and the signal restart code. Insert it into the
+ * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
*/
+static struct vm_area_struct gate_vma;
-int vectors_user_mapping(void)
+static int __init gate_vma_init(void)
{
- struct mm_struct *mm = current->mm;
- return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
- VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYEXEC | VM_RESERVED,
- NULL);
+ gate_vma.vm_start = 0xffff0000;
+ gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
+ gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+ gate_vma.vm_flags = VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC;
+ return 0;
+}
+arch_initcall(gate_vma_init);
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+ return &gate_vma;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+ return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
+}
+
+int in_gate_area_no_mm(unsigned long addr)
+{
+ return in_gate_area(NULL, addr);
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
+ return (vma == &gate_vma) ? "[vectors]" : NULL;
}
#endif
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 5416c7c12528..27d186abbc06 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -10,6 +10,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/syscore_ops.h>
#include <linux/timer.h>
#include <asm/sched_clock.h>
@@ -164,3 +165,20 @@ void __init sched_clock_postinit(void)
sched_clock_poll(sched_clock_timer.data);
}
+
+static int sched_clock_suspend(void)
+{
+ sched_clock_poll(sched_clock_timer.data);
+ return 0;
+}
+
+static struct syscore_ops sched_clock_ops = {
+ .suspend = sched_clock_suspend,
+};
+
+static int __init sched_clock_syscore_init(void)
+{
+ register_syscore_ops(&sched_clock_ops);
+ return 0;
+}
+device_initcall(sched_clock_syscore_init);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9e0fdb3a1988..b91411371ae1 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -976,7 +976,6 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
- early_trap_init();
if (mdesc->init_early)
mdesc->init_early();
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 9e617bd4a146..7cb532fc8aa4 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -66,12 +66,13 @@ const unsigned long syscall_restart_code[2] = {
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -280,10 +281,7 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0) {
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
}
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
@@ -636,13 +634,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
/*
* Block the signal if we were successful.
*/
- spin_lock_irq(&tsk->sighand->siglock);
- sigorsets(&tsk->blocked, &tsk->blocked,
- &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&tsk->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
+ block_sigmask(ka, sig);
return 0;
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8f8cce2c46c4..2cee7d1eb958 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,6 +58,8 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
+static DECLARE_COMPLETION(cpu_running);
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -98,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
ret = boot_secondary(cpu, idle);
if (ret == 0) {
- unsigned long timeout;
-
/*
* CPU was successfully started, wait for it
* to come online or time out.
*/
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout)) {
- if (cpu_online(cpu))
- break;
-
- udelay(10);
- barrier();
- }
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -288,9 +282,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
- * before we continue.
+ * before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+ complete(&cpu_running);
/*
* Setup the percpu timer for this CPU.
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 8c57dd3680e9..fe31b22f18fd 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -25,8 +25,6 @@
#include <linux/timer.h>
#include <linux/irq.h>
-#include <linux/mc146818rtc.h>
-
#include <asm/leds.h>
#include <asm/thread_info.h>
#include <asm/sched_clock.h>
@@ -149,8 +147,6 @@ void __init time_init(void)
{
system_timer = machine_desc->timer;
system_timer->init();
-#ifdef CONFIG_HAVE_SCHED_CLOCK
sched_clock_postinit();
-#endif
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cd77743472a2..778454750a6c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -227,6 +227,11 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#else
#define S_SMP ""
#endif
+#ifdef CONFIG_THUMB2_KERNEL
+#define S_ISA " THUMB2"
+#else
+#define S_ISA " ARM"
+#endif
static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
{
@@ -234,8 +239,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
static int die_counter;
int ret;
- printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
- str, err, ++die_counter);
+ printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
+ S_ISA "\n", str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
@@ -784,18 +789,16 @@ static void __init kuser_get_tls_init(unsigned long vectors)
memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
}
-void __init early_trap_init(void)
+void __init early_trap_init(void *vectors_base)
{
-#if defined(CONFIG_CPU_USE_DOMAINS)
- unsigned long vectors = CONFIG_VECTORS_BASE;
-#else
- unsigned long vectors = (unsigned long)vectors_page;
-#endif
+ unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ vectors_page = vectors_base;
+
/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 4320b2096789..698479f1e197 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -437,7 +437,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
/* DMA slave channel configuration */
atslave->dma_dev = &at_hdmac_device.dev;
- atslave->reg_width = AT_DMA_SLAVE_WIDTH_32BIT;
atslave->cfg = ATC_FIFOCFG_HALFFIFO
| ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index 5400a1d65035..d62fe090d814 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/proc-fns.h>
+#include <asm/system_misc.h>
#include <asm/mach/arch.h>
#include <mach/at91x40.h>
#include <mach/at91_st.h>
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index 555d956b3a57..ece1f9aefb47 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -17,9 +17,10 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
-#include <asm/proc-fns.h>
#include <linux/io.h>
#include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
#include "pm.h"
@@ -27,61 +28,39 @@
static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
-static struct cpuidle_driver at91_idle_driver = {
- .name = "at91_idle",
- .owner = THIS_MODULE,
-};
-
/* Actual code that puts the SoC in different idle states */
static int at91_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- struct timeval before, after;
- int idle_time;
-
- local_irq_disable();
- do_gettimeofday(&before);
- if (index == 0)
- /* Wait for interrupt state */
- cpu_do_idle();
- else if (index == 1)
- at91_standby();
+ at91_standby();
- do_gettimeofday(&after);
- local_irq_enable();
- idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
- (after.tv_usec - before.tv_usec);
-
- dev->last_residency = idle_time;
return index;
}
+static struct cpuidle_driver at91_idle_driver = {
+ .name = "at91_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = at91_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "RAM_SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = AT91_MAX_STATES,
+};
+
/* Initialize CPU idle by registering the idle states */
static int at91_init_cpuidle(void)
{
struct cpuidle_device *device;
- struct cpuidle_driver *driver = &at91_idle_driver;
device = &per_cpu(at91_cpuidle_device, smp_processor_id());
device->state_count = AT91_MAX_STATES;
- driver->state_count = AT91_MAX_STATES;
-
- /* Wait for interrupt state */
- driver->states[0].enter = at91_enter_idle;
- driver->states[0].exit_latency = 1;
- driver->states[0].target_residency = 10000;
- driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
- strcpy(driver->states[0].name, "WFI");
- strcpy(driver->states[0].desc, "Wait for interrupt");
-
- /* Wait for interrupt and RAM self refresh state */
- driver->states[1].enter = at91_enter_idle;
- driver->states[1].exit_latency = 10;
- driver->states[1].target_residency = 10000;
- driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
- strcpy(driver->states[1].name, "RAM_SR");
- strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
cpuidle_register_driver(&at91_idle_driver);
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
index 187cb58345c0..fff48d1a0f4e 100644
--- a/arch/arm/mach-at91/include/mach/at_hdmac.h
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -24,18 +24,6 @@ struct at_dma_platform_data {
};
/**
- * enum at_dma_slave_width - DMA slave register access width.
- * @AT_DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
- * @AT_DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
- * @AT_DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
- */
-enum at_dma_slave_width {
- AT_DMA_SLAVE_WIDTH_8BIT = 0,
- AT_DMA_SLAVE_WIDTH_16BIT,
- AT_DMA_SLAVE_WIDTH_32BIT,
-};
-
-/**
* struct at_dma_slave - Controller-specific information about a slave
* @dma_dev: required DMA master device
* @tx_reg: physical address of data register used for
@@ -48,9 +36,6 @@ enum at_dma_slave_width {
*/
struct at_dma_slave {
struct device *dma_dev;
- dma_addr_t tx_reg;
- dma_addr_t rx_reg;
- enum at_dma_slave_width reg_width;
u32 cfg;
u32 ctrla;
};
diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h
deleted file mode 100644
index 4003001eca3d..000000000000
--- a/arch/arm/mach-at91/include/mach/io.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/io.h
- *
- * Copyright (C) 2003 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
-
-#include <mach/hardware.h>
-
-#define IO_SPACE_LIMIT 0xFFFFFFFF
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/uncompress.h b/arch/arm/mach-at91/include/mach/uncompress.h
index 0234fd9d20d6..4218647c1fcd 100644
--- a/arch/arm/mach-at91/include/mach/uncompress.h
+++ b/arch/arm/mach-at91/include/mach/uncompress.h
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/atmel_serial.h>
+#include <mach/hardware.h>
#if defined(CONFIG_AT91_EARLY_DBGU0)
#define UART_OFFSET AT91_BASE_DBGU0
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 1083739e3065..97cc04dc8073 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -11,6 +11,7 @@
#include <linux/pm.h>
#include <linux/of_address.h>
+#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-bcmring/include/mach/io.h b/arch/arm/mach-bcmring/include/mach/io.h
deleted file mode 100644
index dae5e9b166ea..000000000000
--- a/arch/arm/mach-bcmring/include/mach/io.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *
- * Copyright (C) 1999 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#include <mach/hardware.h>
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-clps711x/edb7211-mm.c b/arch/arm/mach-clps711x/edb7211-mm.c
index 0bea1454ae03..4372f06c9929 100644
--- a/arch/arm/mach-clps711x/edb7211-mm.c
+++ b/arch/arm/mach-clps711x/edb7211-mm.c
@@ -21,6 +21,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/bug.h>
#include <mach/hardware.h>
#include <asm/page.h>
diff --git a/arch/arm/mach-clps711x/include/mach/io.h b/arch/arm/mach-clps711x/include/mach/io.h
deleted file mode 100644
index 2e0b3ced8f07..000000000000
--- a/arch/arm/mach-clps711x/include/mach/io.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/mach-clps711x/include/mach/io.h
- *
- * Copyright (C) 1999 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-/*
- * We don't support ins[lb]/outs[lb]. Make them fault.
- */
-#define __raw_readsb(p,d,l) do { *(int *)0 = 0; } while (0)
-#define __raw_readsl(p,d,l) do { *(int *)0 = 0; } while (0)
-#define __raw_writesb(p,d,l) do { *(int *)0 = 0; } while (0)
-#define __raw_writesl(p,d,l) do { *(int *)0 = 0; } while (0)
-
-#endif
diff --git a/arch/arm/mach-clps711x/include/mach/uncompress.h b/arch/arm/mach-clps711x/include/mach/uncompress.h
index 7164310dea7c..35ed731b9f16 100644
--- a/arch/arm/mach-clps711x/include/mach/uncompress.h
+++ b/arch/arm/mach-clps711x/include/mach/uncompress.h
@@ -17,7 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <mach/io.h>
#include <mach/hardware.h>
#include <asm/hardware/clps7111.h>
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 941a308e1253..031805b1428d 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -72,13 +72,13 @@ void __init cns3xxx_map_io(void)
/* used by entry-macro.S */
void __init cns3xxx_init_irq(void)
{
- gic_init(0, 29, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
- __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
+ gic_init(0, 29, IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
+ IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
}
void cns3xxx_power_off(void)
{
- u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT);
+ u32 __iomem *pm_base = IOMEM(CNS3XXX_PM_BASE_VIRT);
u32 clkctrl;
printk(KERN_INFO "powering system down...\n");
@@ -237,7 +237,7 @@ static void __init __cns3xxx_timer_init(unsigned int timer_irq)
static void __init cns3xxx_timer_init(void)
{
- cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT);
+ cns3xxx_tmr1 = IOMEM(CNS3XXX_TIMER1_2_3_BASE_VIRT);
__cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0);
}
diff --git a/arch/arm/mach-cns3xxx/devices.c b/arch/arm/mach-cns3xxx/devices.c
index 79d1fb02c23f..1e40c99b015f 100644
--- a/arch/arm/mach-cns3xxx/devices.c
+++ b/arch/arm/mach-cns3xxx/devices.c
@@ -98,7 +98,7 @@ static struct platform_device cns3xxx_sdhci_pdev = {
void __init cns3xxx_sdhci_init(void)
{
- u32 __iomem *gpioa = __io(CNS3XXX_MISC_BASE_VIRT + 0x0014);
+ u32 __iomem *gpioa = IOMEM(CNS3XXX_MISC_BASE_VIRT + 0x0014);
u32 gpioa_pins = __raw_readl(gpioa);
/* MMC/SD pins share with GPIOA */
diff --git a/arch/arm/mach-cns3xxx/include/mach/io.h b/arch/arm/mach-cns3xxx/include/mach/io.h
deleted file mode 100644
index 33b6fc1ece7c..000000000000
--- a/arch/arm/mach-cns3xxx/include/mach/io.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright 2008 Cavium Networks
- * Copyright 2003 ARM Limited
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-#ifndef __MACH_IO_H
-#define __MACH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index a30c7c5a6d83..9107691adbdb 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/export.h>
#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
#include <mach/cpuidle.h>
#include <mach/ddr2.h>
@@ -30,12 +31,43 @@ struct davinci_ops {
u32 flags;
};
+/* Actual code that puts the SoC in different idle states */
+static int davinci_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
+
+ if (ops && ops->enter)
+ ops->enter(ops->flags);
+
+ index = cpuidle_wrap_enter(dev, drv, index,
+ arm_cpuidle_simple_enter);
+
+ if (ops && ops->exit)
+ ops->exit(ops->flags);
+
+ return index;
+}
+
/* fields in davinci_ops.flags */
#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
static struct cpuidle_driver davinci_idle_driver = {
- .name = "cpuidle-davinci",
- .owner = THIS_MODULE,
+ .name = "cpuidle-davinci",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = davinci_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "DDR SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = DAVINCI_CPUIDLE_MAX_STATES,
};
static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
@@ -77,41 +109,10 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
},
};
-/* Actual code that puts the SoC in different idle states */
-static int davinci_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
- struct timeval before, after;
- int idle_time;
-
- local_irq_disable();
- do_gettimeofday(&before);
-
- if (ops && ops->enter)
- ops->enter(ops->flags);
- /* Wait for interrupt state */
- cpu_do_idle();
- if (ops && ops->exit)
- ops->exit(ops->flags);
-
- do_gettimeofday(&after);
- local_irq_enable();
- idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
- (after.tv_usec - before.tv_usec);
-
- dev->last_residency = idle_time;
-
- return index;
-}
-
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
int ret;
struct cpuidle_device *device;
- struct cpuidle_driver *driver = &davinci_idle_driver;
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -123,27 +124,11 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
ddr2_reg_base = pdata->ddr2_ctlr_base;
- /* Wait for interrupt state */
- driver->states[0].enter = davinci_enter_idle;
- driver->states[0].exit_latency = 1;
- driver->states[0].target_residency = 10000;
- driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
- strcpy(driver->states[0].name, "WFI");
- strcpy(driver->states[0].desc, "Wait for interrupt");
-
- /* Wait for interrupt and DDR self refresh state */
- driver->states[1].enter = davinci_enter_idle;
- driver->states[1].exit_latency = 10;
- driver->states[1].target_residency = 10000;
- driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
- strcpy(driver->states[1].name, "DDR SR");
- strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
if (pdata->ddr2_pdown)
davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
- driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
index c1661d2feca9..768b3c060214 100644
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ b/arch/arm/mach-davinci/include/mach/entry-macro.S
@@ -8,7 +8,6 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <mach/io.h>
#include <mach/irqs.h>
.macro get_irqnr_preamble, base, tmp
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index 0209b1fc22a1..2184691ebc2f 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -30,10 +30,4 @@
#define __IO_ADDRESS(x) ((x) + IO_OFFSET)
#define IO_ADDRESS(pa) IOMEM(__IO_ADDRESS(pa))
-#ifdef __ASSEMBLER__
-#define IOMEM(x) x
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-davinci/include/mach/io.h b/arch/arm/mach-davinci/include/mach/io.h
deleted file mode 100644
index b2267d1e1a71..000000000000
--- a/arch/arm/mach-davinci/include/mach/io.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * DaVinci IO address definitions
- *
- * Copied from include/asm/arm/arch-omap/io.h
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-#define __mem_isa(a) (a)
-
-#endif /* __ASM_ARCH_IO_H */
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index 9dc7cf9664fe..da2fb2c2155a 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -25,6 +25,8 @@
#include <mach/serial.h>
+#define IOMEM(x) ((void __force __iomem *)(x))
+
u32 *uart;
/* PORT_16C550A, in polled non-fifo mode */
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index e1969ce904dc..75da315b6587 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -19,11 +19,14 @@
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <mach/hardware.h>
+#include <asm/sched_clock.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
+
#include <mach/cputype.h>
+#include <mach/hardware.h>
#include <mach/time.h>
+
#include "clock.h"
static struct clock_event_device clockevent_davinci;
@@ -272,19 +275,9 @@ static cycle_t read_cycles(struct clocksource *cs)
return (cycles_t)timer32_read(t);
}
-/*
- * Kernel assumes that sched_clock can be called early but may not have
- * things ready yet.
- */
-static cycle_t read_dummy(struct clocksource *cs)
-{
- return 0;
-}
-
-
static struct clocksource clocksource_davinci = {
.rating = 300,
- .read = read_dummy,
+ .read = read_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -292,12 +285,9 @@ static struct clocksource clocksource_davinci = {
/*
* Overwrite weak default sched_clock with something more precise
*/
-unsigned long long notrace sched_clock(void)
+static u32 notrace davinci_read_sched_clock(void)
{
- const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci);
-
- return clocksource_cyc2ns(cyc, clocksource_davinci.mult,
- clocksource_davinci.shift);
+ return timer32_read(&timers[TID_CLOCKSOURCE]);
}
/*
@@ -397,12 +387,14 @@ static void __init davinci_timer_init(void)
davinci_clock_tick_rate = clk_get_rate(timer_clk);
/* setup clocksource */
- clocksource_davinci.read = read_cycles;
clocksource_davinci.name = id_to_name[clocksource_id];
if (clocksource_register_hz(&clocksource_davinci,
davinci_clock_tick_rate))
printk(err, clocksource_davinci.name);
+ setup_sched_clock(davinci_read_sched_clock, 32,
+ davinci_clock_tick_rate);
+
/* setup clockevent */
clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
diff --git a/arch/arm/mach-dove/addr-map.c b/arch/arm/mach-dove/addr-map.c
index 98b8c83b09ab..2a06c0163418 100644
--- a/arch/arm/mach-dove/addr-map.c
+++ b/arch/arm/mach-dove/addr-map.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/setup.h>
+#include <mach/dove.h>
#include <plat/addr-map.h>
#include "common.h"
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index eb4936ff90ad..29c8b85355a5 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -15,6 +15,5 @@
#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 8c9f56a3e8ec..6f8068692edf 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -116,6 +116,20 @@ static void __init ebsa110_map_io(void)
iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc));
}
+static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size,
+ unsigned int flags, void *caller)
+{
+ return (void __iomem *)cookie;
+}
+
+static void ebsa110_iounmap(volatile void __iomem *io_addr)
+{}
+
+static void __init ebsa110_init_early(void)
+{
+ arch_ioremap_caller = ebsa110_ioremap_caller;
+ arch_iounmap = ebsa110_iounmap;
+}
#define PIT_CTRL (PIT_BASE + 0x0d)
#define PIT_T2 (PIT_BASE + 0x09)
@@ -312,6 +326,7 @@ MACHINE_START(EBSA110, "EBSA110")
.reserve_lp2 = 1,
.restart_mode = 's',
.map_io = ebsa110_map_io,
+ .init_early = ebsa110_init_early,
.init_irq = ebsa110_init_irq,
.timer = &ebsa110_timer,
.restart = ebsa110_restart,
diff --git a/arch/arm/mach-ebsa110/include/mach/io.h b/arch/arm/mach-ebsa110/include/mach/io.h
index 44679db672fb..11bb0799424b 100644
--- a/arch/arm/mach-ebsa110/include/mach/io.h
+++ b/arch/arm/mach-ebsa110/include/mach/io.h
@@ -62,15 +62,6 @@ void __writel(u32 val, void __iomem *addr);
#define writew(v,b) __writew(v,b)
#define writel(v,b) __writel(v,b)
-static inline void __iomem *__arch_ioremap(unsigned long cookie, size_t size,
- unsigned int flags)
-{
- return (void __iomem *)cookie;
-}
-
-#define __arch_ioremap __arch_ioremap
-#define __arch_iounmap(cookie) do { } while (0)
-
extern void insb(unsigned int port, void *buf, int sz);
extern void insw(unsigned int port, void *buf, int sz);
extern void insl(unsigned int port, void *buf, int sz);
diff --git a/arch/arm/mach-ep93xx/include/mach/io.h b/arch/arm/mach-ep93xx/include/mach/io.h
deleted file mode 100644
index 594b77f21054..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/io.h
- */
-
-#ifndef __ASM_MACH_IO_H
-#define __ASM_MACH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(p) __typesafe_io(p)
-#define __mem_pci(p) (p)
-
-/*
- * A typesafe __io() variation for variable initialisers
- */
-#ifdef __ASSEMBLER__
-#define IOMEM(p) p
-#else
-#define IOMEM(p) ((void __iomem __force *)(p))
-#endif
-
-#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-exynos/include/mach/io.h b/arch/arm/mach-exynos/include/mach/io.h
deleted file mode 100644
index d5478d247535..000000000000
--- a/arch/arm/mach-exynos/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/io.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Based on arch/arm/mach-s5p6442/include/mach/io.h
- *
- * Default IO routines for EXYNOS4
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H __FILE__
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-footbridge/include/mach/io.h b/arch/arm/mach-footbridge/include/mach/io.h
index 15a70396c27d..aba531eebbc6 100644
--- a/arch/arm/mach-footbridge/include/mach/io.h
+++ b/arch/arm/mach-footbridge/include/mach/io.h
@@ -27,18 +27,5 @@
* Translation of various region addresses to virtual addresses
*/
#define __io(a) ((void __iomem *)(PCIO_BASE + (a)))
-#if 1
-#define __mem_pci(a) (a)
-#else
-
-static inline void __iomem *___mem_pci(void __iomem *p)
-{
- unsigned long a = (unsigned long)p;
- BUG_ON(a <= 0xc0000000 || a >= 0xe0000000);
- return p;
-}
-
-#define __mem_pci(a) ___mem_pci(a)
-#endif
#endif
diff --git a/arch/arm/mach-gemini/include/mach/io.h b/arch/arm/mach-gemini/include/mach/io.h
deleted file mode 100644
index c548056b98b2..000000000000
--- a/arch/arm/mach-gemini/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __MACH_IO_H
-#define __MACH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif /* __MACH_IO_H */
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index e756d1ac00c2..aa1331e86bcf 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -24,6 +24,7 @@
#include <asm/dma.h>
#include <mach/hardware.h>
#include <asm/irq.h>
+#include <asm/system_misc.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-h720x/include/mach/io.h b/arch/arm/mach-h720x/include/mach/io.h
deleted file mode 100644
index 2c8659c21a93..000000000000
--- a/arch/arm/mach-h720x/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-h720x/include/mach/io.h
- *
- * Copyright (C) 2000 Steve Hill (sjhill@cotw.com)
- *
- * Changelog:
- *
- * 09-19-2001 JJKIM
- * Created from arch/arm/mach-l7200/include/mach/io.h
- *
- * 03-27-2003 Robert Schwebel <r.schwebel@pengutronix.de>:
- * re-unified header files for h720x
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 808b055289b2..410a112bb52e 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -35,7 +35,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
-#include <mach/irqs.h>
#include "core.h"
#include "sysregs.h"
diff --git a/arch/arm/mach-highbank/include/mach/io.h b/arch/arm/mach-highbank/include/mach/io.h
deleted file mode 100644
index 70cfa3ba7697..000000000000
--- a/arch/arm/mach-highbank/include/mach/io.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __MACH_IO_H
-#define __MACH_IO_H
-
-#define __io(a) ({ (void)(a); __typesafe_io(0); })
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-highbank/include/mach/irqs.h b/arch/arm/mach-highbank/include/mach/irqs.h
deleted file mode 100644
index 9746aab14e9a..000000000000
--- a/arch/arm/mach-highbank/include/mach/irqs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __MACH_IRQS_H
-#define __MACH_IRQS_H
-
-#define NR_IRQS 192
-
-#endif
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 52359f80c42d..7561eca131b0 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,6 +1,3 @@
-config IMX_HAVE_DMA_V1
- bool
-
config HAVE_IMX_GPC
bool
@@ -38,7 +35,6 @@ config SOC_IMX1
bool
select ARCH_MX1
select CPU_ARM920T
- select IMX_HAVE_DMA_V1
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -46,7 +42,6 @@ config SOC_IMX21
bool
select MACH_MX21
select CPU_ARM926T
- select IMX_HAVE_DMA_V1
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -61,7 +56,6 @@ config SOC_IMX27
bool
select MACH_MX27
select CPU_ARM926T
- select IMX_HAVE_DMA_V1
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 35fc450fa263..ab939c5046c3 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,5 +1,3 @@
-obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o
-
obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
deleted file mode 100644
index 3189a6004cf9..000000000000
--- a/arch/arm/mach-imx/dma-v1.c
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- * linux/arch/arm/plat-mxc/dma-v1.c
- *
- * i.MX DMA registration and IRQ dispatching
- *
- * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
- * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
- * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/scatterlist.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <mach/dma-v1.h>
-
-#define DMA_DCR 0x00 /* Control Register */
-#define DMA_DISR 0x04 /* Interrupt status Register */
-#define DMA_DIMR 0x08 /* Interrupt mask Register */
-#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
-#define DMA_DRTOSR 0x10 /* Request timeout Register */
-#define DMA_DSESR 0x14 /* Transfer Error Status Register */
-#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
-#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
-#define DMA_WSRA 0x40 /* W-Size Register A */
-#define DMA_XSRA 0x44 /* X-Size Register A */
-#define DMA_YSRA 0x48 /* Y-Size Register A */
-#define DMA_WSRB 0x4c /* W-Size Register B */
-#define DMA_XSRB 0x50 /* X-Size Register B */
-#define DMA_YSRB 0x54 /* Y-Size Register B */
-#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
-#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
-#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
-#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
-#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
-#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
-#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
-#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
-#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
-
-#define DCR_DRST (1<<1)
-#define DCR_DEN (1<<0)
-#define DBTOCR_EN (1<<15)
-#define DBTOCR_CNT(x) ((x) & 0x7fff)
-#define CNTR_CNT(x) ((x) & 0xffffff)
-#define CCR_ACRPT (1<<14)
-#define CCR_DMOD_LINEAR (0x0 << 12)
-#define CCR_DMOD_2D (0x1 << 12)
-#define CCR_DMOD_FIFO (0x2 << 12)
-#define CCR_DMOD_EOBFIFO (0x3 << 12)
-#define CCR_SMOD_LINEAR (0x0 << 10)
-#define CCR_SMOD_2D (0x1 << 10)
-#define CCR_SMOD_FIFO (0x2 << 10)
-#define CCR_SMOD_EOBFIFO (0x3 << 10)
-#define CCR_MDIR_DEC (1<<9)
-#define CCR_MSEL_B (1<<8)
-#define CCR_DSIZ_32 (0x0 << 6)
-#define CCR_DSIZ_8 (0x1 << 6)
-#define CCR_DSIZ_16 (0x2 << 6)
-#define CCR_SSIZ_32 (0x0 << 4)
-#define CCR_SSIZ_8 (0x1 << 4)
-#define CCR_SSIZ_16 (0x2 << 4)
-#define CCR_REN (1<<3)
-#define CCR_RPT (1<<2)
-#define CCR_FRC (1<<1)
-#define CCR_CEN (1<<0)
-#define RTOR_EN (1<<15)
-#define RTOR_CLK (1<<14)
-#define RTOR_PSC (1<<13)
-
-/*
- * struct imx_dma_channel - i.MX specific DMA extension
- * @name: name specified by DMA client
- * @irq_handler: client callback for end of transfer
- * @err_handler: client callback for error condition
- * @data: clients context data for callbacks
- * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
- * @sg: pointer to the actual read/written chunk for scatter-gather emulation
- * @resbytes: total residual number of bytes to transfer
- * (it can be lower or same as sum of SG mapped chunk sizes)
- * @sgcount: number of chunks to be read/written
- *
- * Structure is used for IMX DMA processing. It would be probably good
- * @struct dma_struct in the future for external interfacing and use
- * @struct imx_dma_channel only as extension to it.
- */
-
-struct imx_dma_channel {
- const char *name;
- void (*irq_handler) (int, void *);
- void (*err_handler) (int, void *, int errcode);
- void (*prog_handler) (int, void *, struct scatterlist *);
- void *data;
- unsigned int dma_mode;
- struct scatterlist *sg;
- unsigned int resbytes;
- int dma_num;
-
- int in_use;
-
- u32 ccr_from_device;
- u32 ccr_to_device;
-
- struct timer_list watchdog;
-
- int hw_chaining;
-};
-
-static void __iomem *imx_dmav1_baseaddr;
-
-static void imx_dmav1_writel(unsigned val, unsigned offset)
-{
- __raw_writel(val, imx_dmav1_baseaddr + offset);
-}
-
-static unsigned imx_dmav1_readl(unsigned offset)
-{
- return __raw_readl(imx_dmav1_baseaddr + offset);
-}
-
-static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
-
-static struct clk *dma_clk;
-
-static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
-{
- if (cpu_is_mx27())
- return imxdma->hw_chaining;
- else
- return 0;
-}
-
-/*
- * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
- */
-static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- unsigned long now;
-
- if (!imxdma->name) {
- printk(KERN_CRIT "%s: called for not allocated channel %d\n",
- __func__, channel);
- return 0;
- }
-
- now = min(imxdma->resbytes, sg->length);
- if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
- imxdma->resbytes -= now;
-
- if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
- imx_dmav1_writel(sg->dma_address, DMA_DAR(channel));
- else
- imx_dmav1_writel(sg->dma_address, DMA_SAR(channel));
-
- imx_dmav1_writel(now, DMA_CNTR(channel));
-
- pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
- "size 0x%08x\n", channel,
- imx_dmav1_readl(DMA_DAR(channel)),
- imx_dmav1_readl(DMA_SAR(channel)),
- imx_dmav1_readl(DMA_CNTR(channel)));
-
- return now;
-}
-
-/**
- * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from
- * device transfer
- *
- * @channel: i.MX DMA channel number
- * @dma_address: the DMA/physical memory address of the linear data block
- * to transfer
- * @dma_length: length of the data block in bytes
- * @dev_addr: physical device port address
- * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
- * or %DMA_MODE_WRITE from memory to the device
- *
- * Return value: if incorrect parameters are provided -%EINVAL.
- * Zero indicates success.
- */
-int
-imx_dma_setup_single(int channel, dma_addr_t dma_address,
- unsigned int dma_length, unsigned int dev_addr,
- unsigned int dmamode)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
-
- imxdma->sg = NULL;
- imxdma->dma_mode = dmamode;
-
- if (!dma_address) {
- printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
- channel);
- return -EINVAL;
- }
-
- if (!dma_length) {
- printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
- channel);
- return -EINVAL;
- }
-
- if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
- pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
- "dev_addr=0x%08x for read\n",
- channel, __func__, (unsigned int)dma_address,
- dma_length, dev_addr);
-
- imx_dmav1_writel(dev_addr, DMA_SAR(channel));
- imx_dmav1_writel(dma_address, DMA_DAR(channel));
- imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
- } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
- pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
- "dev_addr=0x%08x for write\n",
- channel, __func__, (unsigned int)dma_address,
- dma_length, dev_addr);
-
- imx_dmav1_writel(dma_address, DMA_SAR(channel));
- imx_dmav1_writel(dev_addr, DMA_DAR(channel));
- imx_dmav1_writel(imxdma->ccr_to_device,
- DMA_CCR(channel));
- } else {
- printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
- channel);
- return -EINVAL;
- }
-
- imx_dmav1_writel(dma_length, DMA_CNTR(channel));
-
- return 0;
-}
-EXPORT_SYMBOL(imx_dma_setup_single);
-
-/**
- * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
- * @channel: i.MX DMA channel number
- * @sg: pointer to the scatter-gather list/vector
- * @sgcount: scatter-gather list hungs count
- * @dma_length: total length of the transfer request in bytes
- * @dev_addr: physical device port address
- * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
- * or %DMA_MODE_WRITE from memory to the device
- *
- * The function sets up DMA channel state and registers to be ready for
- * transfer specified by provided parameters. The scatter-gather emulation
- * is set up according to the parameters.
- *
- * The full preparation of the transfer requires setup of more register
- * by the caller before imx_dma_enable() can be called.
- *
- * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes
- *
- * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx
- *
- * %CCR(channel) has to specify transfer parameters, the next settings is
- * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is
- * specified
- *
- * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
- *
- * The typical setup for %DMA_MODE_WRITE is specified by next options
- * combination
- *
- * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
- *
- * Be careful here and do not mistakenly mix source and target device
- * port sizes constants, they are really different:
- * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
- * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
- *
- * Return value: if incorrect parameters are provided -%EINVAL.
- * Zero indicates success.
- */
-int
-imx_dma_setup_sg(int channel,
- struct scatterlist *sg, unsigned int sgcount,
- unsigned int dma_length, unsigned int dev_addr,
- unsigned int dmamode)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
-
- if (imxdma->in_use)
- return -EBUSY;
-
- imxdma->sg = sg;
- imxdma->dma_mode = dmamode;
- imxdma->resbytes = dma_length;
-
- if (!sg || !sgcount) {
- printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
- channel);
- return -EINVAL;
- }
-
- if (!sg->length) {
- printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
- channel);
- return -EINVAL;
- }
-
- if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
- pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
- "dev_addr=0x%08x for read\n",
- channel, __func__, sg, sgcount, dma_length, dev_addr);
-
- imx_dmav1_writel(dev_addr, DMA_SAR(channel));
- imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
- } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
- pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
- "dev_addr=0x%08x for write\n",
- channel, __func__, sg, sgcount, dma_length, dev_addr);
-
- imx_dmav1_writel(dev_addr, DMA_DAR(channel));
- imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel));
- } else {
- printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
- channel);
- return -EINVAL;
- }
-
- imx_dma_sg_next(channel, sg);
-
- return 0;
-}
-EXPORT_SYMBOL(imx_dma_setup_sg);
-
-int
-imx_dma_config_channel(int channel, unsigned int config_port,
- unsigned int config_mem, unsigned int dmareq, int hw_chaining)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- u32 dreq = 0;
-
- imxdma->hw_chaining = 0;
-
- if (hw_chaining) {
- imxdma->hw_chaining = 1;
- if (!imx_dma_hw_chain(imxdma))
- return -EINVAL;
- }
-
- if (dmareq)
- dreq = CCR_REN;
-
- imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
- imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
-
- imx_dmav1_writel(dmareq, DMA_RSSR(channel));
-
- return 0;
-}
-EXPORT_SYMBOL(imx_dma_config_channel);
-
-void imx_dma_config_burstlen(int channel, unsigned int burstlen)
-{
- imx_dmav1_writel(burstlen, DMA_BLR(channel));
-}
-EXPORT_SYMBOL(imx_dma_config_burstlen);
-
-/**
- * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification
- * handlers
- * @channel: i.MX DMA channel number
- * @irq_handler: the pointer to the function called if the transfer
- * ends successfully
- * @err_handler: the pointer to the function called if the premature
- * end caused by error occurs
- * @data: user specified value to be passed to the handlers
- */
-int
-imx_dma_setup_handlers(int channel,
- void (*irq_handler) (int, void *),
- void (*err_handler) (int, void *, int),
- void *data)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- unsigned long flags;
-
- if (!imxdma->name) {
- printk(KERN_CRIT "%s: called for not allocated channel %d\n",
- __func__, channel);
- return -ENODEV;
- }
-
- local_irq_save(flags);
- imx_dmav1_writel(1 << channel, DMA_DISR);
- imxdma->irq_handler = irq_handler;
- imxdma->err_handler = err_handler;
- imxdma->data = data;
- local_irq_restore(flags);
- return 0;
-}
-EXPORT_SYMBOL(imx_dma_setup_handlers);
-
-/**
- * imx_dma_setup_progression_handler - setup i.MX DMA channel progression
- * handlers
- * @channel: i.MX DMA channel number
- * @prog_handler: the pointer to the function called if the transfer progresses
- */
-int
-imx_dma_setup_progression_handler(int channel,
- void (*prog_handler) (int, void*, struct scatterlist*))
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- unsigned long flags;
-
- if (!imxdma->name) {
- printk(KERN_CRIT "%s: called for not allocated channel %d\n",
- __func__, channel);
- return -ENODEV;
- }
-
- local_irq_save(flags);
- imxdma->prog_handler = prog_handler;
- local_irq_restore(flags);
- return 0;
-}
-EXPORT_SYMBOL(imx_dma_setup_progression_handler);
-
-/**
- * imx_dma_enable - function to start i.MX DMA channel operation
- * @channel: i.MX DMA channel number
- *
- * The channel has to be allocated by driver through imx_dma_request()
- * or imx_dma_request_by_prio() function.
- * The transfer parameters has to be set to the channel registers through
- * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
- * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to
- * be set prior this function call by the channel user.
- */
-void imx_dma_enable(int channel)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- unsigned long flags;
-
- pr_debug("imxdma%d: imx_dma_enable\n", channel);
-
- if (!imxdma->name) {
- printk(KERN_CRIT "%s: called for not allocated channel %d\n",
- __func__, channel);
- return;
- }
-
- if (imxdma->in_use)
- return;
-
- local_irq_save(flags);
-
- imx_dmav1_writel(1 << channel, DMA_DISR);
- imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
- imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
- CCR_ACRPT, DMA_CCR(channel));
-
- if ((cpu_is_mx21() || cpu_is_mx27()) &&
- imxdma->sg && imx_dma_hw_chain(imxdma)) {
- imxdma->sg = sg_next(imxdma->sg);
- if (imxdma->sg) {
- u32 tmp;
- imx_dma_sg_next(channel, imxdma->sg);
- tmp = imx_dmav1_readl(DMA_CCR(channel));
- imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
- DMA_CCR(channel));
- }
- }
- imxdma->in_use = 1;
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(imx_dma_enable);
-
-/**
- * imx_dma_disable - stop, finish i.MX DMA channel operatin
- * @channel: i.MX DMA channel number
- */
-void imx_dma_disable(int channel)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- unsigned long flags;
-
- pr_debug("imxdma%d: imx_dma_disable\n", channel);
-
- if (imx_dma_hw_chain(imxdma))
- del_timer(&imxdma->watchdog);
-
- local_irq_save(flags);
- imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
- imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
- DMA_CCR(channel));
- imx_dmav1_writel(1 << channel, DMA_DISR);
- imxdma->in_use = 0;
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(imx_dma_disable);
-
-static void imx_dma_watchdog(unsigned long chno)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
-
- imx_dmav1_writel(0, DMA_CCR(chno));
- imxdma->in_use = 0;
- imxdma->sg = NULL;
-
- if (imxdma->err_handler)
- imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
-}
-
-static irqreturn_t dma_err_handler(int irq, void *dev_id)
-{
- int i, disr;
- struct imx_dma_channel *imxdma;
- unsigned int err_mask;
- int errcode;
-
- disr = imx_dmav1_readl(DMA_DISR);
-
- err_mask = imx_dmav1_readl(DMA_DBTOSR) |
- imx_dmav1_readl(DMA_DRTOSR) |
- imx_dmav1_readl(DMA_DSESR) |
- imx_dmav1_readl(DMA_DBOSR);
-
- if (!err_mask)
- return IRQ_HANDLED;
-
- imx_dmav1_writel(disr & err_mask, DMA_DISR);
-
- for (i = 0; i < IMX_DMA_CHANNELS; i++) {
- if (!(err_mask & (1 << i)))
- continue;
- imxdma = &imx_dma_channels[i];
- errcode = 0;
-
- if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
- imx_dmav1_writel(1 << i, DMA_DBTOSR);
- errcode |= IMX_DMA_ERR_BURST;
- }
- if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
- imx_dmav1_writel(1 << i, DMA_DRTOSR);
- errcode |= IMX_DMA_ERR_REQUEST;
- }
- if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
- imx_dmav1_writel(1 << i, DMA_DSESR);
- errcode |= IMX_DMA_ERR_TRANSFER;
- }
- if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
- imx_dmav1_writel(1 << i, DMA_DBOSR);
- errcode |= IMX_DMA_ERR_BUFFER;
- }
- if (imxdma->name && imxdma->err_handler) {
- imxdma->err_handler(i, imxdma->data, errcode);
- continue;
- }
-
- imx_dma_channels[i].sg = NULL;
-
- printk(KERN_WARNING
- "DMA timeout on channel %d (%s) -%s%s%s%s\n",
- i, imxdma->name,
- errcode & IMX_DMA_ERR_BURST ? " burst" : "",
- errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
- errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
- errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
- }
- return IRQ_HANDLED;
-}
-
-static void dma_irq_handle_channel(int chno)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
-
- if (!imxdma->name) {
- /*
- * IRQ for an unregistered DMA channel:
- * let's clear the interrupts and disable it.
- */
- printk(KERN_WARNING
- "spurious IRQ for DMA channel %d\n", chno);
- return;
- }
-
- if (imxdma->sg) {
- u32 tmp;
- struct scatterlist *current_sg = imxdma->sg;
- imxdma->sg = sg_next(imxdma->sg);
-
- if (imxdma->sg) {
- imx_dma_sg_next(chno, imxdma->sg);
-
- tmp = imx_dmav1_readl(DMA_CCR(chno));
-
- if (imx_dma_hw_chain(imxdma)) {
- /* FIXME: The timeout should probably be
- * configurable
- */
- mod_timer(&imxdma->watchdog,
- jiffies + msecs_to_jiffies(500));
-
- tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
- imx_dmav1_writel(tmp, DMA_CCR(chno));
- } else {
- imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
- tmp |= CCR_CEN;
- }
-
- imx_dmav1_writel(tmp, DMA_CCR(chno));
-
- if (imxdma->prog_handler)
- imxdma->prog_handler(chno, imxdma->data,
- current_sg);
-
- return;
- }
-
- if (imx_dma_hw_chain(imxdma)) {
- del_timer(&imxdma->watchdog);
- return;
- }
- }
-
- imx_dmav1_writel(0, DMA_CCR(chno));
- imxdma->in_use = 0;
- if (imxdma->irq_handler)
- imxdma->irq_handler(chno, imxdma->data);
-}
-
-static irqreturn_t dma_irq_handler(int irq, void *dev_id)
-{
- int i, disr;
-
- if (cpu_is_mx21() || cpu_is_mx27())
- dma_err_handler(irq, dev_id);
-
- disr = imx_dmav1_readl(DMA_DISR);
-
- pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
- disr);
-
- imx_dmav1_writel(disr, DMA_DISR);
- for (i = 0; i < IMX_DMA_CHANNELS; i++) {
- if (disr & (1 << i))
- dma_irq_handle_channel(i);
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * imx_dma_request - request/allocate specified channel number
- * @channel: i.MX DMA channel number
- * @name: the driver/caller own non-%NULL identification
- */
-int imx_dma_request(int channel, const char *name)
-{
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
- unsigned long flags;
- int ret = 0;
-
- /* basic sanity checks */
- if (!name)
- return -EINVAL;
-
- if (channel >= IMX_DMA_CHANNELS) {
- printk(KERN_CRIT "%s: called for non-existed channel %d\n",
- __func__, channel);
- return -EINVAL;
- }
-
- local_irq_save(flags);
- if (imxdma->name) {
- local_irq_restore(flags);
- return -EBUSY;
- }
- memset(imxdma, 0, sizeof(*imxdma));
- imxdma->name = name;
- local_irq_restore(flags); /* request_irq() can block */
-
- if (cpu_is_mx21() || cpu_is_mx27()) {
- ret = request_irq(MX2x_INT_DMACH0 + channel,
- dma_irq_handler, 0, "DMA", NULL);
- if (ret) {
- imxdma->name = NULL;
- pr_crit("Can't register IRQ %d for DMA channel %d\n",
- MX2x_INT_DMACH0 + channel, channel);
- return ret;
- }
- init_timer(&imxdma->watchdog);
- imxdma->watchdog.function = &imx_dma_watchdog;
- imxdma->watchdog.data = channel;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(imx_dma_request);
-
-/**
- * imx_dma_free - release previously acquired channel
- * @channel: i.MX DMA channel number
- */
-void imx_dma_free(int channel)
-{
- unsigned long flags;
- struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
-
- if (!imxdma->name) {
- printk(KERN_CRIT
- "%s: trying to free free channel %d\n",
- __func__, channel);
- return;
- }
-
- local_irq_save(flags);
- /* Disable interrupts */
- imx_dma_disable(channel);
- imxdma->name = NULL;
-
- if (cpu_is_mx21() || cpu_is_mx27())
- free_irq(MX2x_INT_DMACH0 + channel, NULL);
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(imx_dma_free);
-
-/**
- * imx_dma_request_by_prio - find and request some of free channels best
- * suiting requested priority
- * @channel: i.MX DMA channel number
- * @name: the driver/caller own non-%NULL identification
- *
- * This function tries to find a free channel in the specified priority group
- * if the priority cannot be achieved it tries to look for free channel
- * in the higher and then even lower priority groups.
- *
- * Return value: If there is no free channel to allocate, -%ENODEV is returned.
- * On successful allocation channel is returned.
- */
-int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
-{
- int i;
- int best;
-
- switch (prio) {
- case (DMA_PRIO_HIGH):
- best = 8;
- break;
- case (DMA_PRIO_MEDIUM):
- best = 4;
- break;
- case (DMA_PRIO_LOW):
- default:
- best = 0;
- break;
- }
-
- for (i = best; i < IMX_DMA_CHANNELS; i++)
- if (!imx_dma_request(i, name))
- return i;
-
- for (i = best - 1; i >= 0; i--)
- if (!imx_dma_request(i, name))
- return i;
-
- printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
-
- return -ENODEV;
-}
-EXPORT_SYMBOL(imx_dma_request_by_prio);
-
-static int __init imx_dma_init(void)
-{
- int ret = 0;
- int i;
-
- if (cpu_is_mx1())
- imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
- else if (cpu_is_mx21())
- imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
- else if (cpu_is_mx27())
- imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
- else
- return 0;
-
- dma_clk = clk_get(NULL, "dma");
- if (IS_ERR(dma_clk))
- return PTR_ERR(dma_clk);
- clk_enable(dma_clk);
-
- /* reset DMA module */
- imx_dmav1_writel(DCR_DRST, DMA_DCR);
-
- if (cpu_is_mx1()) {
- ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
- if (ret) {
- pr_crit("Wow! Can't register IRQ for DMA\n");
- return ret;
- }
-
- ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL);
- if (ret) {
- pr_crit("Wow! Can't register ERRIRQ for DMA\n");
- free_irq(MX1_DMA_INT, NULL);
- return ret;
- }
- }
-
- /* enable DMA module */
- imx_dmav1_writel(DCR_DEN, DMA_DCR);
-
- /* clear all interrupts */
- imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
-
- /* disable interrupts */
- imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
-
- for (i = 0; i < IMX_DMA_CHANNELS; i++) {
- imx_dma_channels[i].sg = NULL;
- imx_dma_channels[i].dma_num = i;
- }
-
- return ret;
-}
-
-arch_initcall(imx_dma_init);
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h
deleted file mode 100644
index ac6fd713828a..000000000000
--- a/arch/arm/mach-imx/include/mach/dma-v1.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * linux/arch/arm/mach-imx/include/mach/dma-v1.h
- *
- * i.MX DMA registration and IRQ dispatching
- *
- * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
- * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
- * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_DMA_V1_H__
-#define __MACH_DMA_V1_H__
-
-#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
-
-#include <mach/dma.h>
-
-#define IMX_DMA_CHANNELS 16
-
-#define DMA_MODE_READ 0
-#define DMA_MODE_WRITE 1
-#define DMA_MODE_MASK 1
-
-#define MX1_DMA_REG(offset) MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR + (offset))
-
-/* DMA Interrupt Mask Register */
-#define MX1_DMA_DIMR MX1_DMA_REG(0x08)
-
-/* Channel Control Register */
-#define MX1_DMA_CCR(x) MX1_DMA_REG(0x8c + ((x) << 6))
-
-#define IMX_DMA_MEMSIZE_32 (0 << 4)
-#define IMX_DMA_MEMSIZE_8 (1 << 4)
-#define IMX_DMA_MEMSIZE_16 (2 << 4)
-#define IMX_DMA_TYPE_LINEAR (0 << 10)
-#define IMX_DMA_TYPE_2D (1 << 10)
-#define IMX_DMA_TYPE_FIFO (2 << 10)
-
-#define IMX_DMA_ERR_BURST (1 << 0)
-#define IMX_DMA_ERR_REQUEST (1 << 1)
-#define IMX_DMA_ERR_TRANSFER (1 << 2)
-#define IMX_DMA_ERR_BUFFER (1 << 3)
-#define IMX_DMA_ERR_TIMEOUT (1 << 4)
-
-int
-imx_dma_config_channel(int channel, unsigned int config_port,
- unsigned int config_mem, unsigned int dmareq, int hw_chaining);
-
-void
-imx_dma_config_burstlen(int channel, unsigned int burstlen);
-
-int
-imx_dma_setup_single(int channel, dma_addr_t dma_address,
- unsigned int dma_length, unsigned int dev_addr,
- unsigned int dmamode);
-
-
-/*
- * Use this flag as the dma_length argument to imx_dma_setup_sg()
- * to create an endless running dma loop. The end of the scatterlist
- * must be linked to the beginning for this to work.
- */
-#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
-
-int
-imx_dma_setup_sg(int channel, struct scatterlist *sg,
- unsigned int sgcount, unsigned int dma_length,
- unsigned int dev_addr, unsigned int dmamode);
-
-int
-imx_dma_setup_handlers(int channel,
- void (*irq_handler) (int, void *),
- void (*err_handler) (int, void *, int), void *data);
-
-int
-imx_dma_setup_progression_handler(int channel,
- void (*prog_handler) (int, void*, struct scatterlist*));
-
-void imx_dma_enable(int channel);
-
-void imx_dma_disable(int channel);
-
-int imx_dma_request(int channel, const char *name);
-
-void imx_dma_free(int channel);
-
-int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
-
-#endif /* __MACH_DMA_V1_H__ */
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index f8ca96c354f2..74127389e7ab 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <asm/pgtable.h>
+#include <asm/system_misc.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
@@ -61,8 +62,8 @@ static void imx3_idle(void)
: "=r" (reg));
}
-static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
- unsigned int mtype)
+static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
if (mtype == MT_DEVICE) {
/*
@@ -75,7 +76,7 @@ static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
mtype = MT_DEVICE_NONSHARED;
}
- return __arm_ioremap(phys_addr, size, mtype);
+ return __arm_ioremap_caller(phys_addr, size, mtype, caller);
}
void __init imx3_init_l2x0(void)
@@ -134,7 +135,7 @@ void __init imx31_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX31);
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
- imx_ioremap = imx3_ioremap;
+ arch_ioremap_caller = imx3_ioremap_caller;
arm_pm_idle = imx3_idle;
}
@@ -208,7 +209,7 @@ void __init imx35_init_early(void)
mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
arm_pm_idle = imx3_idle;
- imx_ioremap = imx3_ioremap;
+ arch_ioremap_caller = imx3_ioremap_caller;
}
void __init mx35_init_irq(void)
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index 51af9fa56944..05250aed61fb 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/clk.h>
+#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 1a65d77bd55d..eaf6c6366ffa 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -25,8 +25,9 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <asm/irq.h>
#include <mach/cm.h>
+#include <mach/irqs.h>
+
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/mach-integrator/include/mach/io.h b/arch/arm/mach-integrator/include/mach/io.h
index 37beed3fa3ed..8de70de3dd0a 100644
--- a/arch/arm/mach-integrator/include/mach/io.h
+++ b/arch/arm/mach-integrator/include/mach/io.h
@@ -29,6 +29,5 @@
#define PCI_IO_VADDR 0xee000000
#define __io(a) ((void __iomem *)(PCI_IO_VADDR + (a)))
-#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-integrator/include/mach/irqs.h b/arch/arm/mach-integrator/include/mach/irqs.h
index 1fbe6d190222..a19a1a2fcf6b 100644
--- a/arch/arm/mach-integrator/include/mach/irqs.h
+++ b/arch/arm/mach-integrator/include/mach/irqs.h
@@ -78,5 +78,6 @@
#define IRQ_SIC_CP_LMINT7 46
#define IRQ_SIC_END 46
-#define NR_IRQS 47
+#define NR_IRQS_INTEGRATOR_AP 34
+#define NR_IRQS_INTEGRATOR_CP 47
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 21a1d6cbef40..871f148ffd72 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -38,12 +38,13 @@
#include <mach/hardware.h>
#include <mach/platform.h>
#include <asm/hardware/arm_timer.h>
-#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/param.h> /* HZ */
#include <asm/mach-types.h>
+#include <asm/sched_clock.h>
#include <mach/lm.h>
+#include <mach/irqs.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
@@ -325,6 +326,11 @@ static void __init ap_init(void)
static unsigned long timer_reload;
+static u32 notrace integrator_read_sched_clock(void)
+{
+ return -readl((void __iomem *) TIMER2_VA_BASE + TIMER_VALUE);
+}
+
static void integrator_clocksource_init(unsigned long inrate)
{
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
@@ -341,6 +347,7 @@ static void integrator_clocksource_init(unsigned long inrate)
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
rate, 200, 16, clocksource_mmio_readl_down);
+ setup_sched_clock(integrator_read_sched_clock, 16, rate);
}
static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE;
@@ -468,6 +475,7 @@ MACHINE_START(INTEGRATOR, "ARM-Integrator")
.atag_offset = 0x100,
.reserve = integrator_reserve,
.map_io = ap_map_io,
+ .nr_irqs = NR_IRQS_INTEGRATOR_AP,
.init_early = integrator_init_early,
.init_irq = ap_init_irq,
.timer = &ap_timer,
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index be9ead4a3bcc..48a115a91d9d 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -26,7 +26,6 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/hardware/arm_timer.h>
@@ -34,6 +33,7 @@
#include <mach/cm.h>
#include <mach/lm.h>
+#include <mach/irqs.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
@@ -464,6 +464,7 @@ MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
.atag_offset = 0x100,
.reserve = integrator_reserve,
.map_io = intcp_map_io,
+ .nr_irqs = NR_IRQS_INTEGRATOR_CP,
.init_early = intcp_init_early,
.init_irq = intcp_init_irq,
.timer = &cp_timer,
diff --git a/arch/arm/mach-integrator/pci.c b/arch/arm/mach-integrator/pci.c
index 36068f438f2b..f1ca9c122861 100644
--- a/arch/arm/mach-integrator/pci.c
+++ b/arch/arm/mach-integrator/pci.c
@@ -26,10 +26,11 @@
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#include <mach/irqs.h>
+
/*
* A small note about bridges and interrupts. The DECchip 21050 (and
* later) adheres to the PCI-PCI bridge specification. This says that
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 4be172c3cbe0..67e6f9a9d1a0 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -30,7 +30,8 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <asm/irq.h>
+#include <mach/irqs.h>
+
#include <asm/signal.h>
#include <asm/mach/pci.h>
#include <asm/irq_regs.h>
diff --git a/arch/arm/mach-iop13xx/include/mach/io.h b/arch/arm/mach-iop13xx/include/mach/io.h
index dffb234bb967..f13188518025 100644
--- a/arch/arm/mach-iop13xx/include/mach/io.h
+++ b/arch/arm/mach-iop13xx/include/mach/io.h
@@ -22,20 +22,7 @@
#define IO_SPACE_LIMIT 0xffffffff
#define __io(a) __iop13xx_io(a)
-#define __mem_pci(a) (a)
-#define __mem_isa(a) (a)
extern void __iomem * __iop13xx_io(unsigned long io_addr);
-extern void __iomem *__iop13xx_ioremap(unsigned long cookie, size_t size,
- unsigned int mtype);
-extern void __iop13xx_iounmap(void __iomem *addr);
-
-extern u32 iop13xx_atue_mem_base;
-extern u32 iop13xx_atux_mem_base;
-extern size_t iop13xx_atue_mem_size;
-extern size_t iop13xx_atux_mem_size;
-
-#define __arch_ioremap __iop13xx_ioremap
-#define __arch_iounmap __iop13xx_iounmap
#endif
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
index 07e9ff7adafb..e190dcd7d72d 100644
--- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h
+++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
@@ -5,6 +5,7 @@
/* The ATU offsets can change based on the strapping */
extern u32 iop13xx_atux_pmmr_offset;
extern u32 iop13xx_atue_pmmr_offset;
+void iop13xx_init_early(void);
void iop13xx_init_irq(void);
void iop13xx_map_io(void);
void iop13xx_platform_init(void);
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c
index 48642e66c566..3c364198db9c 100644
--- a/arch/arm/mach-iop13xx/io.c
+++ b/arch/arm/mach-iop13xx/io.c
@@ -21,6 +21,8 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include "pci.h"
+
void * __iomem __iop13xx_io(unsigned long io_addr)
{
void __iomem * io_virt;
@@ -40,8 +42,8 @@ void * __iomem __iop13xx_io(unsigned long io_addr)
}
EXPORT_SYMBOL(__iop13xx_io);
-void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
- unsigned int mtype)
+static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie,
+ size_t size, unsigned int mtype, void *caller)
{
void __iomem * retval;
@@ -76,17 +78,14 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
break;
default:
retval = __arm_ioremap_caller(cookie, size, mtype,
- __builtin_return_address(0));
+ caller);
}
return retval;
}
-EXPORT_SYMBOL(__iop13xx_ioremap);
-void __iop13xx_iounmap(void __iomem *addr)
+static void __iop13xx_iounmap(volatile void __iomem *addr)
{
- extern void __iounmap(volatile void __iomem *addr);
-
if (iop13xx_atue_mem_base)
if (addr >= (void __iomem *) iop13xx_atue_mem_base &&
addr < (void __iomem *) (iop13xx_atue_mem_base +
@@ -110,4 +109,9 @@ void __iop13xx_iounmap(void __iomem *addr)
skip:
return;
}
-EXPORT_SYMBOL(__iop13xx_iounmap);
+
+void __init iop13xx_init_early(void)
+{
+ arch_ioremap_caller = __iop13xx_ioremap_caller;
+ arch_iounmap = __iop13xx_iounmap;
+}
diff --git a/arch/arm/mach-iop13xx/iq81340mc.c b/arch/arm/mach-iop13xx/iq81340mc.c
index abaee8833588..5c96b73e6964 100644
--- a/arch/arm/mach-iop13xx/iq81340mc.c
+++ b/arch/arm/mach-iop13xx/iq81340mc.c
@@ -92,6 +92,7 @@ static struct sys_timer iq81340mc_timer = {
MACHINE_START(IQ81340MC, "Intel IQ81340MC")
/* Maintainer: Dan Williams <dan.j.williams@intel.com> */
.atag_offset = 0x100,
+ .init_early = iop13xx_init_early,
.map_io = iop13xx_map_io,
.init_irq = iop13xx_init_irq,
.timer = &iq81340mc_timer,
diff --git a/arch/arm/mach-iop13xx/iq81340sc.c b/arch/arm/mach-iop13xx/iq81340sc.c
index 690916a09dc6..aa4dd750135a 100644
--- a/arch/arm/mach-iop13xx/iq81340sc.c
+++ b/arch/arm/mach-iop13xx/iq81340sc.c
@@ -94,6 +94,7 @@ static struct sys_timer iq81340sc_timer = {
MACHINE_START(IQ81340SC, "Intel IQ81340SC")
/* Maintainer: Dan Williams <dan.j.williams@intel.com> */
.atag_offset = 0x100,
+ .init_early = iop13xx_init_early,
.map_io = iop13xx_map_io,
.init_irq = iop13xx_init_irq,
.timer = &iq81340sc_timer,
diff --git a/arch/arm/mach-iop13xx/pci.h b/arch/arm/mach-iop13xx/pci.h
new file mode 100644
index 000000000000..c70cf5b41e31
--- /dev/null
+++ b/arch/arm/mach-iop13xx/pci.h
@@ -0,0 +1,6 @@
+#include <linux/types.h>
+
+extern u32 iop13xx_atue_mem_base;
+extern u32 iop13xx_atux_mem_base;
+extern size_t iop13xx_atue_mem_size;
+extern size_t iop13xx_atux_mem_size;
diff --git a/arch/arm/mach-iop32x/include/mach/io.h b/arch/arm/mach-iop32x/include/mach/io.h
index 2d88264b9863..e2ada265bb8d 100644
--- a/arch/arm/mach-iop32x/include/mach/io.h
+++ b/arch/arm/mach-iop32x/include/mach/io.h
@@ -15,6 +15,5 @@
#define IO_SPACE_LIMIT 0xffffffff
#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
-#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-iop33x/include/mach/io.h b/arch/arm/mach-iop33x/include/mach/io.h
index a8a66fc8fbdb..f7c1b6595660 100644
--- a/arch/arm/mach-iop33x/include/mach/io.h
+++ b/arch/arm/mach-iop33x/include/mach/io.h
@@ -15,6 +15,5 @@
#define IO_SPACE_LIMIT 0xffffffff
#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
-#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/io.h b/arch/arm/mach-ixp2000/include/mach/io.h
index 859e584914d9..f6552d6f35ab 100644
--- a/arch/arm/mach-ixp2000/include/mach/io.h
+++ b/arch/arm/mach-ixp2000/include/mach/io.h
@@ -18,7 +18,6 @@
#include <mach/hardware.h>
#define IO_SPACE_LIMIT 0xffffffff
-#define __mem_pci(a) (a)
/*
* The A? revisions of the IXP2000s assert byte lanes for PCI I/O
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index d2c2dc35cbdd..d34542425990 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -36,6 +36,7 @@
#include <asm/irq.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
+#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/mach-ixp23xx/include/mach/io.h b/arch/arm/mach-ixp23xx/include/mach/io.h
index 4ce4353b9f72..a7aceb55c130 100644
--- a/arch/arm/mach-ixp23xx/include/mach/io.h
+++ b/arch/arm/mach-ixp23xx/include/mach/io.h
@@ -18,6 +18,5 @@
#define IO_SPACE_LIMIT 0xffffffff
#define __io(p) ((void __iomem*)((p) + IXP23XX_PCI_IO_VIRT))
-#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index a7277ad470a5..90e42e9982cb 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -165,6 +165,7 @@ static void __init avila_init(void)
MACHINE_START(AVILA, "Gateworks Avila Network Platform")
/* Maintainer: Deepak Saxena <dsaxena@plexity.net> */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
@@ -184,6 +185,7 @@ MACHINE_END
MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
/* Maintainer: Tom Billman <kernel@giantshoulderinc.com> */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index a6329a0a8ec4..ebbd7fc90eb4 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -31,11 +31,13 @@
#include <mach/udc.h>
#include <mach/hardware.h>
+#include <mach/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/irq.h>
#include <asm/sched_clock.h>
+#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -517,3 +519,35 @@ void ixp4xx_restart(char mode, const char *cmd)
*IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
}
}
+
+#ifdef CONFIG_IXP4XX_INDIRECT_PCI
+/*
+ * In the case of using indirect PCI, we simply return the actual PCI
+ * address and our read/write implementation use that to drive the
+ * access registers. If something outside of PCI is ioremap'd, we
+ * fallback to the default.
+ */
+
+static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ if (!is_pci_memory(addr))
+ return __arm_ioremap_caller(addr, size, mtype, caller);
+
+ return (void __iomem *)addr;
+}
+
+static void ixp4xx_iounmap(void __iomem *addr)
+{
+ if (!is_pci_memory((__force u32)addr))
+ __iounmap(addr);
+}
+
+void __init ixp4xx_init_early(void)
+{
+ arch_ioremap_caller = ixp4xx_ioremap_caller;
+ arch_iounmap = ixp4xx_iounmap;
+}
+#else
+void __init ixp4xx_init_early(void) {}
+#endif
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index a74f86ce8bcc..1b83110028d6 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -110,6 +110,7 @@ static void __init coyote_init(void)
MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
@@ -129,6 +130,7 @@ MACHINE_END
MACHINE_START(IXDPG425, "Intel IXDPG425")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 67be177b336a..97a0af8f1955 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -280,6 +280,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
/* Maintainer: www.nslu2-linux.org */
.atag_offset = 0x100,
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &dsmg600_timer,
.init_machine = dsmg600_init,
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 6d5818285af8..9175a25a7511 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -270,6 +270,7 @@ static void __init fsg_init(void)
MACHINE_START(FSG, "Freecom FSG-3")
/* Maintainer: www.nslu2-linux.org */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index 7ecf9b28f1c0..033c71758953 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -97,6 +97,7 @@ static void __init gateway7001_init(void)
MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
/* Maintainer: Imre Kaloz <kaloz@openwrt.org> */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 78ae12c46261..46bb924962ee 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -496,6 +496,7 @@ subsys_initcall(gmlr_pci_init);
MACHINE_START(GORAMO_MLR, "MultiLink")
/* Maintainer: Krzysztof Halasa */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index a23f89391458..18ebc6be7969 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -165,6 +165,7 @@ static void __init gtwx5715_init(void)
MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
/* Maintainer: George Joseph */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index c30e7e923a73..034bb2a1b805 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -23,8 +23,6 @@
#define PCIBIOS_MAX_MEM 0x4BFFFFFF
#endif
-#define ARCH_HAS_DMA_SET_COHERENT_MASK
-
/* Register locations and bits */
#include "ixp4xx-regs.h"
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index ffb9d6afb89f..5cf30d1b78d2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -39,11 +39,7 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
* but in some cases the performance hit is acceptable. In addition, you
* cannot mmap() PCI devices in this case.
*/
-#ifndef CONFIG_IXP4XX_INDIRECT_PCI
-
-#define __mem_pci(a) (a)
-
-#else
+#ifdef CONFIG_IXP4XX_INDIRECT_PCI
/*
* In the case of using indirect PCI, we simply return the actual PCI
@@ -57,24 +53,6 @@ static inline int is_pci_memory(u32 addr)
return (addr >= PCIBIOS_MIN_MEM) && (addr <= 0x4FFFFFFF);
}
-static inline void __iomem * __indirect_ioremap(unsigned long addr, size_t size,
- unsigned int mtype)
-{
- if (!is_pci_memory(addr))
- return __arm_ioremap(addr, size, mtype);
-
- return (void __iomem *)addr;
-}
-
-static inline void __indirect_iounmap(void __iomem *addr)
-{
- if (!is_pci_memory((__force u32)addr))
- __iounmap(addr);
-}
-
-#define __arch_ioremap __indirect_ioremap
-#define __arch_iounmap __indirect_iounmap
-
#define writeb(v, p) __indirect_writeb(v, p)
#define writew(v, p) __indirect_writew(v, p)
#define writel(v, p) __indirect_writel(v, p)
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index df9250bbf13d..b66bedc64de1 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -121,6 +121,7 @@ extern unsigned long ixp4xx_timer_freq;
* Functions used by platform-level setup code
*/
extern void ixp4xx_map_io(void);
+extern void ixp4xx_init_early(void);
extern void ixp4xx_init_irq(void);
extern void ixp4xx_sys_init(void);
extern void ixp4xx_timer_init(void);
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 8a38b39999f8..3d742aee1773 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -254,6 +254,7 @@ static void __init ixdp425_init(void)
MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
@@ -269,6 +270,7 @@ MACHINE_END
MACHINE_START(IXDP465, "Intel IXDP465 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
@@ -283,6 +285,7 @@ MACHINE_END
MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
@@ -297,6 +300,7 @@ MACHINE_END
MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 1010eb7b0083..33cb0955b6bf 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -315,6 +315,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
/* Maintainer: www.nslu2-linux.org */
.atag_offset = 0x100,
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = nas100d_init,
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index aa355c360d57..e2903faaebb3 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -301,6 +301,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
/* Maintainer: www.nslu2-linux.org */
.atag_offset = 0x100,
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &nslu2_timer,
.init_machine = nslu2_init,
diff --git a/arch/arm/mach-ixp4xx/omixp-setup.c b/arch/arm/mach-ixp4xx/omixp-setup.c
index 0940869fcfdd..158ddb79821d 100644
--- a/arch/arm/mach-ixp4xx/omixp-setup.c
+++ b/arch/arm/mach-ixp4xx/omixp-setup.c
@@ -243,6 +243,7 @@ static void __init omixp_init(void)
MACHINE_START(DEVIXP, "Omicron DEVIXP")
.atag_offset = 0x100,
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = omixp_init,
@@ -254,6 +255,7 @@ MACHINE_END
MACHINE_START(MICCPT, "Omicron MICCPT")
.atag_offset = 0x100,
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = omixp_init,
@@ -268,6 +270,7 @@ MACHINE_END
MACHINE_START(MIC256, "Omicron MIC256")
.atag_offset = 0x100,
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = omixp_init,
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 9dec20683291..2798f435aaf4 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -237,6 +237,7 @@ static void __init vulcan_init(void)
MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
/* Maintainer: Marc Zyngier <maz@misterjones.org> */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index 5ac0f0a0fd8c..a785175b115b 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -98,6 +98,7 @@ static void __init wg302v2_init(void)
MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
/* Maintainer: Imre Kaloz <kaloz@openwrt.org> */
.map_io = ixp4xx_map_io,
+ .init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c
index 7088180b018b..0f1710941878 100644
--- a/arch/arm/mach-kirkwood/cpuidle.c
+++ b/arch/arm/mach-kirkwood/cpuidle.c
@@ -20,77 +20,47 @@
#include <linux/io.h>
#include <linux/export.h>
#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
#include <mach/kirkwood.h>
#define KIRKWOOD_MAX_STATES 2
-static struct cpuidle_driver kirkwood_idle_driver = {
- .name = "kirkwood_idle",
- .owner = THIS_MODULE,
-};
-
-static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
-
/* Actual code that puts the SoC in different idle states */
static int kirkwood_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- struct timeval before, after;
- int idle_time;
-
- local_irq_disable();
- do_gettimeofday(&before);
- if (index == 0)
- /* Wait for interrupt state */
- cpu_do_idle();
- else if (index == 1) {
- /*
- * Following write will put DDR in self refresh.
- * Note that we have 256 cycles before DDR puts it
- * self in self-refresh, so the wait-for-interrupt
- * call afterwards won't get the DDR from self refresh
- * mode.
- */
- writel(0x7, DDR_OPERATION_BASE);
- cpu_do_idle();
- }
- do_gettimeofday(&after);
- local_irq_enable();
- idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
- (after.tv_usec - before.tv_usec);
-
- /* Update last residency */
- dev->last_residency = idle_time;
+ writel(0x7, DDR_OPERATION_BASE);
+ cpu_do_idle();
return index;
}
+static struct cpuidle_driver kirkwood_idle_driver = {
+ .name = "kirkwood_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = kirkwood_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "DDR SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = KIRKWOOD_MAX_STATES,
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
+
/* Initialize CPU idle by registering the idle states */
static int kirkwood_init_cpuidle(void)
{
struct cpuidle_device *device;
- struct cpuidle_driver *driver = &kirkwood_idle_driver;
device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
device->state_count = KIRKWOOD_MAX_STATES;
- driver->state_count = KIRKWOOD_MAX_STATES;
-
- /* Wait for interrupt state */
- driver->states[0].enter = kirkwood_enter_idle;
- driver->states[0].exit_latency = 1;
- driver->states[0].target_residency = 10000;
- driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
- strcpy(driver->states[0].name, "WFI");
- strcpy(driver->states[0].desc, "Wait for interrupt");
-
- /* Wait for interrupt and DDR self refresh state */
- driver->states[1].enter = kirkwood_enter_idle;
- driver->states[1].exit_latency = 10;
- driver->states[1].target_residency = 10000;
- driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
- strcpy(driver->states[1].name, "DDR SR");
- strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
cpuidle_register_driver(&kirkwood_idle_driver);
if (cpuidle_register_device(device)) {
diff --git a/arch/arm/mach-kirkwood/include/mach/io.h b/arch/arm/mach-kirkwood/include/mach/io.h
index 49dd0cb5e166..5d0ab61700d2 100644
--- a/arch/arm/mach-kirkwood/include/mach/io.h
+++ b/arch/arm/mach-kirkwood/include/mach/io.h
@@ -20,7 +20,5 @@ static inline void __iomem *__io(unsigned long addr)
}
#define __io(a) __io(a)
-#define __mem_pci(a) (a)
-
#endif
diff --git a/arch/arm/mach-ks8695/include/mach/io.h b/arch/arm/mach-ks8695/include/mach/io.h
deleted file mode 100644
index a7a63ac3ba4e..000000000000
--- a/arch/arm/mach-ks8695/include/mach/io.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-ks8695/include/mach/io.h
- *
- * Copyright (C) 2006 Andrew Victor
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-lpc32xx/clock.c b/arch/arm/mach-lpc32xx/clock.c
index b7ef51119d37..2fc24ca12054 100644
--- a/arch/arm/mach-lpc32xx/clock.c
+++ b/arch/arm/mach-lpc32xx/clock.c
@@ -1134,7 +1134,7 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK(NULL, "i2s1_ck", clk_i2s1)
_REGISTER_CLOCK("ts-lpc32xx", NULL, clk_tsc)
_REGISTER_CLOCK("dev:mmc0", NULL, clk_mmc)
- _REGISTER_CLOCK("lpc-net.0", NULL, clk_net)
+ _REGISTER_CLOCK("lpc-eth.0", NULL, clk_net)
_REGISTER_CLOCK("dev:clcd", NULL, clk_lcd)
_REGISTER_CLOCK("lpc32xx_udc", "ck_usbd", clk_usbd)
_REGISTER_CLOCK("lpc32xx_rtc", NULL, clk_rtc)
diff --git a/arch/arm/mach-lpc32xx/include/mach/io.h b/arch/arm/mach-lpc32xx/include/mach/io.h
deleted file mode 100644
index 9b59ab5cef89..000000000000
--- a/arch/arm/mach-lpc32xx/include/mach/io.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/io.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 3588a5584153..bf5d8e195c3e 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -23,6 +23,7 @@
#include <mach/addr-map.h>
#include <mach/mfp-pxa168.h>
#include <mach/pxa168.h>
+#include <mach/irqs.h>
#include <video/pxa168fb.h>
#include <linux/input.h>
#include <plat/pxa27x_keypad.h>
@@ -239,7 +240,7 @@ static void __init common_init(void)
MACHINE_START(ASPENITE, "PXA168-based Aspenite Development Platform")
.map_io = mmp_map_io,
- .nr_irqs = IRQ_BOARD_START,
+ .nr_irqs = MMP_NR_IRQS,
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = common_init,
@@ -248,7 +249,7 @@ MACHINE_END
MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform")
.map_io = mmp_map_io,
- .nr_irqs = IRQ_BOARD_START,
+ .nr_irqs = MMP_NR_IRQS,
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = common_init,
diff --git a/arch/arm/mach-mmp/avengers_lite.c b/arch/arm/mach-mmp/avengers_lite.c
index b148a9dc5a44..603542ae6fbd 100644
--- a/arch/arm/mach-mmp/avengers_lite.c
+++ b/arch/arm/mach-mmp/avengers_lite.c
@@ -43,6 +43,7 @@ static void __init avengers_lite_init(void)
MACHINE_START(AVENGERS_LITE, "PXA168 Avengers lite Development Platform")
.map_io = mmp_map_io,
+ .nr_irqs = MMP_NR_IRQS,
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = avengers_lite_init,
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index d839fe6421e6..5cb769cd26d9 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -28,7 +28,7 @@
#include "common.h"
-#define BROWNSTONE_NR_IRQS (IRQ_BOARD_START + 40)
+#define BROWNSTONE_NR_IRQS (MMP_NR_IRQS + 40)
#define GPIO_5V_ENABLE (89)
@@ -158,7 +158,7 @@ static struct platform_device brownstone_v_5vp_device = {
};
static struct max8925_platform_data brownstone_max8925_info = {
- .irq_base = IRQ_BOARD_START,
+ .irq_base = MMP_NR_IRQS,
};
static struct i2c_board_info brownstone_twsi1_info[] = {
diff --git a/arch/arm/mach-mmp/flint.c b/arch/arm/mach-mmp/flint.c
index 2ee8cd7829dd..8059cc0905c6 100644
--- a/arch/arm/mach-mmp/flint.c
+++ b/arch/arm/mach-mmp/flint.c
@@ -23,10 +23,11 @@
#include <mach/addr-map.h>
#include <mach/mfp-mmp2.h>
#include <mach/mmp2.h>
+#include <mach/irqs.h>
#include "common.h"
-#define FLINT_NR_IRQS (IRQ_BOARD_START + 48)
+#define FLINT_NR_IRQS (MMP_NR_IRQS + 48)
static unsigned long flint_pin_config[] __initdata = {
/* UART1 */
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 87765467de63..f516e74ce0d5 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -191,7 +191,7 @@ static void __init gplugd_init(void)
MACHINE_START(GPLUGD, "PXA168-based GuruPlug Display (gplugD) Platform")
.map_io = mmp_map_io,
- .nr_irqs = IRQ_BOARD_START,
+ .nr_irqs = MMP_NR_IRQS,
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = gplugd_init,
diff --git a/arch/arm/mach-mmp/include/mach/addr-map.h b/arch/arm/mach-mmp/include/mach/addr-map.h
index 3e404acd6ff4..b1ece08174e8 100644
--- a/arch/arm/mach-mmp/include/mach/addr-map.h
+++ b/arch/arm/mach-mmp/include/mach/addr-map.h
@@ -11,12 +11,6 @@
#ifndef __ASM_MACH_ADDR_MAP_H
#define __ASM_MACH_ADDR_MAP_H
-#ifndef __ASSEMBLER__
-#define IOMEM(x) ((void __iomem *)(x))
-#else
-#define IOMEM(x) (x)
-#endif
-
/* APB - Application Subsystem Peripheral Bus
*
* NOTE: the DMA controller registers are actually on the AXI fabric #1
diff --git a/arch/arm/mach-mmp/include/mach/io.h b/arch/arm/mach-mmp/include/mach/io.h
deleted file mode 100644
index e7adf3d012c1..000000000000
--- a/arch/arm/mach-mmp/include/mach/io.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * linux/arch/arm/mach-mmp/include/mach/io.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_IO_H
-#define __ASM_MACH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-mmp/include/mach/irqs.h b/arch/arm/mach-mmp/include/mach/irqs.h
index 34635a0bbb59..d0e746626a3d 100644
--- a/arch/arm/mach-mmp/include/mach/irqs.h
+++ b/arch/arm/mach-mmp/include/mach/irqs.h
@@ -223,7 +223,6 @@
#define MMP_GPIO_TO_IRQ(gpio) (IRQ_GPIO_START + (gpio))
#define IRQ_BOARD_START (IRQ_GPIO_START + MMP_NR_BUILTIN_GPIO)
-
-#define NR_IRQS (IRQ_BOARD_START)
+#define MMP_NR_IRQS IRQ_BOARD_START
#endif /* __ASM_MACH_IRQS_H */
diff --git a/arch/arm/mach-mmp/irq-mmp2.c b/arch/arm/mach-mmp/irq-mmp2.c
index d21c5441a3d0..7895d277421e 100644
--- a/arch/arm/mach-mmp/irq-mmp2.c
+++ b/arch/arm/mach-mmp/irq-mmp2.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/io.h>
+#include <mach/irqs.h>
#include <mach/regs-icu.h>
#include <mach/mmp2.h>
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 96cf5c8fe47d..ff73249884d0 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -19,6 +19,7 @@
#include <linux/mfd/max8925.h>
#include <linux/interrupt.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/addr-map.h>
@@ -27,7 +28,7 @@
#include "common.h"
-#define JASPER_NR_IRQS (IRQ_BOARD_START + 48)
+#define JASPER_NR_IRQS (MMP_NR_IRQS + 48)
static unsigned long jasper_pin_config[] __initdata = {
/* UART1 */
@@ -135,7 +136,7 @@ static struct max8925_power_pdata jasper_power_data = {
static struct max8925_platform_data jasper_max8925_info = {
.backlight = &jasper_backlight_data,
.power = &jasper_power_data,
- .irq_base = IRQ_BOARD_START,
+ .irq_base = MMP_NR_IRQS,
};
static struct i2c_board_info jasper_twsi1_info[] = {
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c
index bc97170125bf..b28f9084dfff 100644
--- a/arch/arm/mach-mmp/tavorevb.c
+++ b/arch/arm/mach-mmp/tavorevb.c
@@ -101,6 +101,7 @@ static void __init tavorevb_init(void)
MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)")
.map_io = mmp_map_io,
+ .nr_irqs = MMP_NR_IRQS,
.init_irq = pxa910_init_irq,
.timer = &pxa910_timer,
.init_machine = tavorevb_init,
diff --git a/arch/arm/mach-mmp/teton_bga.c b/arch/arm/mach-mmp/teton_bga.c
index 0523e422990e..42bef6674ecf 100644
--- a/arch/arm/mach-mmp/teton_bga.c
+++ b/arch/arm/mach-mmp/teton_bga.c
@@ -26,6 +26,7 @@
#include <mach/mfp-pxa168.h>
#include <mach/pxa168.h>
#include <mach/teton_bga.h>
+#include <mach/irqs.h>
#include "common.h"
@@ -83,7 +84,7 @@ static void __init teton_bga_init(void)
MACHINE_START(TETON_BGA, "PXA168-based Teton BGA Development Platform")
.map_io = mmp_map_io,
- .nr_irqs = IRQ_BOARD_START,
+ .nr_irqs = MMP_NR_IRQS,
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = teton_bga_init,
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index e72c709da44f..3fc9ed21f97d 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -38,7 +38,7 @@
* 16 board interrupts -- PCA9575 GPIO expander
* 24 board interrupts -- 88PM860x PMIC
*/
-#define TTCDKB_NR_IRQS (IRQ_BOARD_START + 16 + 16 + 24)
+#define TTCDKB_NR_IRQS (MMP_NR_IRQS + 16 + 16 + 24)
static unsigned long ttc_dkb_pin_config[] __initdata = {
/* UART2 */
@@ -131,7 +131,7 @@ static struct platform_device *ttc_dkb_devices[] = {
static struct pca953x_platform_data max7312_data[] = {
{
.gpio_base = TTCDKB_GPIO_EXT0(0),
- .irq_base = IRQ_BOARD_START,
+ .irq_base = MMP_NR_IRQS,
},
};
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index a60ab6d04ec5..3698a370d636 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -68,6 +68,11 @@ static struct platform_device *devices[] __initdata = {
extern struct sys_timer msm_timer;
+static void __init halibut_init_early(void)
+{
+ arch_ioremap_caller = __msm_ioremap_caller;
+}
+
static void __init halibut_init_irq(void)
{
msm_init_irq();
@@ -96,6 +101,7 @@ MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
.atag_offset = 0x100,
.fixup = halibut_fixup,
.map_io = halibut_map_io,
+ .init_early = halibut_init_early,
.init_irq = halibut_init_irq,
.init_machine = halibut_init,
.timer = &msm_timer,
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 6b9b227c87c5..5414f76ec0a9 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -43,6 +43,11 @@ static struct platform_device *devices[] __initdata = {
extern struct sys_timer msm_timer;
+static void __init trout_init_early(void)
+{
+ arch_ioremap_caller = __msm_ioremap_caller;
+}
+
static void __init trout_init_irq(void)
{
msm_init_irq();
@@ -96,6 +101,7 @@ MACHINE_START(TROUT, "HTC Dream")
.atag_offset = 0x100,
.fixup = trout_fixup,
.map_io = trout_map_io,
+ .init_early = trout_init_early,
.init_irq = trout_init_irq,
.init_machine = trout_init,
.timer = &msm_timer,
diff --git a/arch/arm/mach-msm/include/mach/io.h b/arch/arm/mach-msm/include/mach/io.h
deleted file mode 100644
index dc1b928745e9..000000000000
--- a/arch/arm/mach-msm/include/mach/io.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* arch/arm/mach-msm/include/mach/io.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __arch_ioremap __msm_ioremap
-#define __arch_iounmap __iounmap
-
-void __iomem *__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype);
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-void msm_map_qsd8x50_io(void);
-void msm_map_msm7x30_io(void);
-void msm_map_msm8x60_io(void);
-void msm_map_msm8960_io(void);
-
-extern unsigned int msm_shared_ram_phys;
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
index 8af46123dab6..6c4046c21296 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
@@ -38,12 +38,6 @@
*
*/
-#ifdef __ASSEMBLY__
-#define IOMEM(x) x
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
#define MSM_VIC_BASE IOMEM(0xE0000000)
#define MSM_VIC_PHYS 0xC0000000
#define MSM_VIC_SIZE SZ_4K
@@ -111,5 +105,11 @@
#define MSM_AD5_PHYS 0xAC000000
#define MSM_AD5_SIZE (SZ_1M*13)
+#ifndef __ASSEMBLY__
+
+extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller);
+
+#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
index 198202c267c8..f944fe65a657 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -100,4 +100,8 @@
#define MSM_HSUSB_PHYS 0xA3600000
#define MSM_HSUSB_SIZE SZ_1K
+#ifndef __ASSEMBLY__
+extern void msm_map_msm7x30_io(void);
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
index 800b55767e6b..a1752c0284fc 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
@@ -50,4 +50,8 @@
#define MSM_DEBUG_UART_PHYS 0x16440000
#endif
+#ifndef __ASSEMBLY__
+extern void msm_map_msm8960_io(void);
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
index 0faa894729b7..da77cc1d545d 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
@@ -122,4 +122,8 @@
#define MSM_SDC4_PHYS 0xA0600000
#define MSM_SDC4_SIZE SZ_4K
+#ifndef __ASSEMBLY__
+extern void msm_map_qsd8x50_io(void);
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
index 54e12caa8d86..5aed57dc808c 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
@@ -67,4 +67,8 @@
#define MSM_DEBUG_UART_PHYS 0x19C40000
#endif
+#ifndef __ASSEMBLY__
+extern void msm_map_msm8x60_io(void);
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 90682f4599d3..00afdfb8c38f 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -37,12 +37,6 @@
*
*/
-#ifdef __ASSEMBLY__
-#define IOMEM(x) x
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
#if defined(CONFIG_ARCH_MSM7X30)
#include "msm_iomap-7x30.h"
#elif defined(CONFIG_ARCH_QSD8X50)
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 578b04e42deb..a1e7b1168850 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -172,8 +172,8 @@ void __init msm_map_msm7x30_io(void)
}
#endif /* CONFIG_ARCH_MSM7X30 */
-void __iomem *
-__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
if (mtype == MT_DEVICE) {
/* The peripherals in the 88000000 - D0000000 range
@@ -184,7 +184,5 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
mtype = MT_DEVICE_NONSHARED;
}
- return __arm_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
+ return __arm_ioremap_caller(phys_addr, size, mtype, caller);
}
-EXPORT_SYMBOL(__msm_ioremap);
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 75f4be40b3e5..812808254936 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -24,6 +24,7 @@
#include <asm/mach/time.h>
#include <asm/hardware/gic.h>
#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
#include <mach/msm_iomap.h>
#include <mach/cpu.h>
@@ -105,12 +106,12 @@ static union {
static void __iomem *source_base;
-static cycle_t msm_read_timer_count(struct clocksource *cs)
+static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
{
return readl_relaxed(source_base + TIMER_COUNT_VAL);
}
-static cycle_t msm_read_timer_count_shift(struct clocksource *cs)
+static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
{
/*
* Shift timer count down by a constant due to unreliable lower bits
@@ -166,6 +167,11 @@ static struct local_timer_ops msm_local_timer_ops __cpuinitdata = {
};
#endif /* CONFIG_LOCAL_TIMERS */
+static notrace u32 msm_sched_clock_read(void)
+{
+ return msm_clocksource.read(&msm_clocksource);
+}
+
static void __init msm_timer_init(void)
{
struct clock_event_device *ce = &msm_clockevent;
@@ -232,6 +238,8 @@ err:
res = clocksource_register_hz(cs, dgt_hz);
if (res)
pr_err("clocksource_register failed\n");
+ setup_sched_clock(msm_sched_clock_read,
+ cpu_is_msm7x01() ? 32 - MSM_DGT_SHIFT : 32, dgt_hz);
}
struct sys_timer msm_timer = {
diff --git a/arch/arm/mach-mv78xx0/include/mach/io.h b/arch/arm/mach-mv78xx0/include/mach/io.h
index 450e0e1ad092..c7d9d00d8fc1 100644
--- a/arch/arm/mach-mv78xx0/include/mach/io.h
+++ b/arch/arm/mach-mv78xx0/include/mach/io.h
@@ -20,7 +20,5 @@ static inline void __iomem *__io(unsigned long addr)
}
#define __io(a) __io(a)
-#define __mem_pci(a) (a)
-
#endif
diff --git a/arch/arm/mach-mxs/include/mach/hardware.h b/arch/arm/mach-mxs/include/mach/hardware.h
index 53e89a09bf0d..4c0e8a64d8c7 100644
--- a/arch/arm/mach-mxs/include/mach/hardware.h
+++ b/arch/arm/mach-mxs/include/mach/hardware.h
@@ -20,10 +20,4 @@
#ifndef __MACH_MXS_HARDWARE_H__
#define __MACH_MXS_HARDWARE_H__
-#ifdef __ASSEMBLER__
-#define IOMEM(addr) (addr)
-#else
-#define IOMEM(addr) ((void __force __iomem *)(addr))
-#endif
-
#endif /* __MACH_MXS_HARDWARE_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/io.h b/arch/arm/mach-mxs/include/mach/io.h
deleted file mode 100644
index 289b7227e072..000000000000
--- a/arch/arm/mach-mxs/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MXS_IO_H__
-#define __MACH_MXS_IO_H__
-
-/* Allow IO space to be anywhere in the memory */
-#define IO_SPACE_LIMIT 0xffffffff
-
-/* io address mapping macro */
-#define __io(a) __typesafe_io(a)
-
-#define __mem_pci(a) (a)
-
-#endif /* __MACH_MXS_IO_H__ */
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 59e67979f197..aa627465d914 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
{
int irq;
- vic_init(__io(io_p2v(NETX_PA_VIC)), 0, ~0, 0);
+ vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
irq_set_chip_and_handler(irq, &netx_hif_chip,
diff --git a/arch/arm/mach-netx/include/mach/hardware.h b/arch/arm/mach-netx/include/mach/hardware.h
index 517a2bd37842..b661af2f2145 100644
--- a/arch/arm/mach-netx/include/mach/hardware.h
+++ b/arch/arm/mach-netx/include/mach/hardware.h
@@ -33,7 +33,7 @@
#define XMAC_MEM_SIZE 0x1000
#define SRAM_MEM_SIZE 0x8000
-#define io_p2v(x) ((x) - NETX_IO_PHYS + NETX_IO_VIRT)
+#define io_p2v(x) IOMEM((x) - NETX_IO_PHYS + NETX_IO_VIRT)
#define io_v2p(x) ((x) - NETX_IO_VIRT + NETX_IO_PHYS)
#endif
diff --git a/arch/arm/mach-netx/include/mach/io.h b/arch/arm/mach-netx/include/mach/io.h
deleted file mode 100644
index c3921cb3b6a6..000000000000
--- a/arch/arm/mach-netx/include/mach/io.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-netx/include/mach/io.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-netx/include/mach/netx-regs.h b/arch/arm/mach-netx/include/mach/netx-regs.h
index 5a03e7ccb01a..fdde22b58ac3 100644
--- a/arch/arm/mach-netx/include/mach/netx-regs.h
+++ b/arch/arm/mach-netx/include/mach/netx-regs.h
@@ -115,7 +115,7 @@
*********************************/
/* Registers */
-#define NETX_SYSTEM_REG(ofs) __io(NETX_VA_SYSTEM + (ofs))
+#define NETX_SYSTEM_REG(ofs) IOMEM(NETX_VA_SYSTEM + (ofs))
#define NETX_SYSTEM_BOO_SR NETX_SYSTEM_REG(0x00)
#define NETX_SYSTEM_IOC_CR NETX_SYSTEM_REG(0x04)
#define NETX_SYSTEM_IOC_MR NETX_SYSTEM_REG(0x08)
@@ -185,7 +185,7 @@
*******************************/
/* Registers */
-#define NETX_GPIO_REG(ofs) __io(NETX_VA_GPIO + (ofs))
+#define NETX_GPIO_REG(ofs) IOMEM(NETX_VA_GPIO + (ofs))
#define NETX_GPIO_CFG(gpio) NETX_GPIO_REG(0x0 + ((gpio)<<2))
#define NETX_GPIO_THRESHOLD_CAPTURE(gpio) NETX_GPIO_REG(0x40 + ((gpio)<<2))
#define NETX_GPIO_COUNTER_CTRL(counter) NETX_GPIO_REG(0x80 + ((counter)<<2))
@@ -230,7 +230,7 @@
*******************************/
/* Registers */
-#define NETX_PIO_REG(ofs) __io(NETX_VA_PIO + (ofs))
+#define NETX_PIO_REG(ofs) IOMEM(NETX_VA_PIO + (ofs))
#define NETX_PIO_INPIO NETX_PIO_REG(0x0)
#define NETX_PIO_OUTPIO NETX_PIO_REG(0x4)
#define NETX_PIO_OEPIO NETX_PIO_REG(0x8)
@@ -240,7 +240,7 @@
*******************************/
/* Registers */
-#define NETX_MIIMU __io(NETX_VA_MIIMU)
+#define NETX_MIIMU IOMEM(NETX_VA_MIIMU)
/* Bits */
#define MIIMU_SNRDY (1<<0)
@@ -317,7 +317,7 @@
*******************************/
/* Registers */
-#define NETX_PFIFO_REG(ofs) __io(NETX_VA_PFIFO + (ofs))
+#define NETX_PFIFO_REG(ofs) IOMEM(NETX_VA_PFIFO + (ofs))
#define NETX_PFIFO_BASE(pfifo) NETX_PFIFO_REG(0x00 + ((pfifo)<<2))
#define NETX_PFIFO_BORDER_BASE(pfifo) NETX_PFIFO_REG(0x80 + ((pfifo)<<2))
#define NETX_PFIFO_RESET NETX_PFIFO_REG(0x100)
@@ -334,7 +334,7 @@
*******************************/
/* Registers */
-#define NETX_MEMCR_REG(ofs) __io(NETX_VA_MEMCR + (ofs))
+#define NETX_MEMCR_REG(ofs) IOMEM(NETX_VA_MEMCR + (ofs))
#define NETX_MEMCR_SRAM_CTRL(cs) NETX_MEMCR_REG(0x0 + 4 * (cs)) /* SRAM for CS 0..2 */
#define NETX_MEMCR_SDRAM_CFG_CTRL NETX_MEMCR_REG(0x40)
#define NETX_MEMCR_SDRAM_TIMING_CTRL NETX_MEMCR_REG(0x44)
@@ -355,7 +355,7 @@
*******************************/
/* Registers */
-#define NETX_DPMAS_REG(ofs) __io(NETX_VA_DPMAS + (ofs))
+#define NETX_DPMAS_REG(ofs) IOMEM(NETX_VA_DPMAS + (ofs))
#define NETX_DPMAS_SYS_STAT NETX_DPMAS_REG(0x4d8)
#define NETX_DPMAS_INT_STAT NETX_DPMAS_REG(0x4e0)
#define NETX_DPMAS_INT_EN NETX_DPMAS_REG(0x4f0)
@@ -425,7 +425,7 @@
/*******************************
* I2C *
*******************************/
-#define NETX_I2C_REG(ofs) __io(NETX_VA_I2C, (ofs))
+#define NETX_I2C_REG(ofs) IOMEM(NETX_VA_I2C, (ofs))
#define NETX_I2C_CTRL NETX_I2C_REG(0x0)
#define NETX_I2C_DATA NETX_I2C_REG(0x4)
diff --git a/arch/arm/mach-nomadik/include/mach/io.h b/arch/arm/mach-nomadik/include/mach/io.h
deleted file mode 100644
index 2e1eca1b8243..000000000000
--- a/arch/arm/mach-nomadik/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-nomadik/include/mach/io.h (copied from mach-sa1100)
- *
- * Copyright (C) 1997-1999 Russell King
- *
- * Modifications:
- * 06-12-1997 RMK Created.
- * 07-04-1999 RMK Major cleanup
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 399c4c49722f..a051cb8ae57f 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -14,6 +14,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <plat/board-ams-delta.h>
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index c3068622fdcb..553a2e535764 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -245,8 +245,6 @@ static struct resource h2_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = OMAP_GPIO_IRQ(0),
- .end = OMAP_GPIO_IRQ(0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -359,11 +357,9 @@ static struct tps65010_board tps_board = {
static struct i2c_board_info __initdata h2_i2c_board_info[] = {
{
I2C_BOARD_INFO("tps65010", 0x48),
- .irq = OMAP_GPIO_IRQ(58),
.platform_data = &tps_board,
}, {
I2C_BOARD_INFO("isp1301_omap", 0x2d),
- .irq = OMAP_GPIO_IRQ(2),
},
};
@@ -428,8 +424,12 @@ static void __init h2_init(void)
omap_cfg_reg(E19_1610_KBR4);
omap_cfg_reg(N19_1610_KBR5);
+ h2_smc91x_resources[1].start = gpio_to_irq(0);
+ h2_smc91x_resources[1].end = gpio_to_irq(0);
platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices));
omap_serial_init();
+ h2_i2c_board_info[0].irq = gpio_to_irq(58);
+ h2_i2c_board_info[1].irq = gpio_to_irq(2);
omap_register_i2c_bus(1, 100, h2_i2c_board_info,
ARRAY_SIZE(h2_i2c_board_info));
omap1_usb_init(&h2_usb_config);
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 64b8584f64ce..4c19f4c06851 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -247,8 +247,6 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = OMAP_GPIO_IRQ(40),
- .end = OMAP_GPIO_IRQ(40),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -338,7 +336,6 @@ static struct spi_board_info h3_spi_board_info[] __initdata = {
.modalias = "tsc2101",
.bus_num = 2,
.chip_select = 0,
- .irq = OMAP_GPIO_IRQ(H3_TS_GPIO),
.max_speed_hz = 16000000,
/* .platform_data = &tsc_platform_data, */
},
@@ -374,11 +371,9 @@ static struct omap_lcd_config h3_lcd_config __initdata = {
static struct i2c_board_info __initdata h3_i2c_board_info[] = {
{
I2C_BOARD_INFO("tps65013", 0x48),
- /* .irq = OMAP_GPIO_IRQ(??), */
},
{
I2C_BOARD_INFO("isp1301_omap", 0x2d),
- .irq = OMAP_GPIO_IRQ(14),
},
};
@@ -420,10 +415,14 @@ static void __init h3_init(void)
omap_cfg_reg(E19_1610_KBR4);
omap_cfg_reg(N19_1610_KBR5);
+ smc91x_resources[1].start = gpio_to_irq(40);
+ smc91x_resources[1].end = gpio_to_irq(40);
platform_add_devices(devices, ARRAY_SIZE(devices));
+ h3_spi_board_info[0].irq = gpio_to_irq(H3_TS_GPIO);
spi_register_board_info(h3_spi_board_info,
ARRAY_SIZE(h3_spi_board_info));
omap_serial_init();
+ h3_i2c_board_info[1].irq = gpio_to_irq(14);
omap_register_i2c_bus(1, 100, h3_i2c_board_info,
ARRAY_SIZE(h3_i2c_board_info));
omap1_usb_init(&h3_usb_config);
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 827d83a96af8..60c06ee23855 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -324,8 +324,6 @@ static struct platform_device gpio_leds_device = {
static struct resource htcpld_resources[] = {
[0] = {
- .start = OMAP_GPIO_IRQ(HTCHERALD_GIRQ_BTNS),
- .end = OMAP_GPIO_IRQ(HTCHERALD_GIRQ_BTNS),
.flags = IORESOURCE_IRQ,
},
};
@@ -450,7 +448,6 @@ static struct spi_board_info __initdata htcherald_spi_board_info[] = {
{
.modalias = "ads7846",
.platform_data = &htcherald_ts_platform_data,
- .irq = OMAP_GPIO_IRQ(HTCHERALD_GPIO_TS),
.max_speed_hz = 2500000,
.bus_num = 2,
.chip_select = 1,
@@ -576,6 +573,8 @@ static void __init htcherald_init(void)
printk(KERN_INFO "HTC Herald init.\n");
/* Do board initialization before we register all the devices */
+ htcpld_resources[0].start = gpio_to_irq(HTCHERALD_GIRQ_BTNS);
+ htcpld_resources[0].end = gpio_to_irq(HTCHERALD_GIRQ_BTNS);
platform_add_devices(devices, ARRAY_SIZE(devices));
htcherald_disable_watchdog();
@@ -583,6 +582,7 @@ static void __init htcherald_init(void)
htcherald_usb_enable();
omap1_usb_init(&htcherald_usb_config);
+ htcherald_spi_board_info[0].irq = gpio_to_irq(HTCHERALD_GPIO_TS);
spi_register_board_info(htcherald_spi_board_info,
ARRAY_SIZE(htcherald_spi_board_info));
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 61219182d16a..67d7fd57a692 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -248,8 +248,6 @@ static struct resource innovator1610_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = OMAP_GPIO_IRQ(0),
- .end = OMAP_GPIO_IRQ(0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -409,6 +407,8 @@ static void __init innovator_init(void)
#endif
#ifdef CONFIG_ARCH_OMAP16XX
if (!cpu_is_omap1510()) {
+ innovator1610_smc91x_resources[1].start = gpio_to_irq(0);
+ innovator1610_smc91x_resources[1].end = gpio_to_irq(0);
platform_add_devices(innovator1610_devices, ARRAY_SIZE(innovator1610_devices));
}
#endif
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index fe95ec5f6f03..d21dcc2fbc5a 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -147,7 +147,6 @@ static struct spi_board_info nokia770_spi_board_info[] __initdata = {
.bus_num = 2,
.chip_select = 0,
.max_speed_hz = 2500000,
- .irq = OMAP_GPIO_IRQ(15),
.platform_data = &nokia770_ads7846_platform_data,
},
};
@@ -237,6 +236,7 @@ static void __init omap_nokia770_init(void)
omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices));
+ nokia770_spi_board_info[1].irq = gpio_to_irq(15);
spi_register_board_info(nokia770_spi_board_info,
ARRAY_SIZE(nokia770_spi_board_info));
omap_serial_init();
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 1fe347396f4d..a5f85dda3f69 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -129,8 +129,6 @@ static struct resource osk5912_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = OMAP_GPIO_IRQ(0),
- .end = OMAP_GPIO_IRQ(0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
@@ -147,8 +145,6 @@ static struct platform_device osk5912_smc91x_device = {
static struct resource osk5912_cf_resources[] = {
[0] = {
- .start = OMAP_GPIO_IRQ(62),
- .end = OMAP_GPIO_IRQ(62),
.flags = IORESOURCE_IRQ,
},
};
@@ -240,7 +236,6 @@ static struct tps65010_board tps_board = {
static struct i2c_board_info __initdata osk_i2c_board_info[] = {
{
I2C_BOARD_INFO("tps65010", 0x48),
- .irq = OMAP_GPIO_IRQ(OMAP_MPUIO(1)),
.platform_data = &tps_board,
},
@@ -408,7 +403,6 @@ static struct spi_board_info __initdata mistral_boardinfo[] = { {
/* MicroWire (bus 2) CS0 has an ads7846e */
.modalias = "ads7846",
.platform_data = &mistral_ts_info,
- .irq = OMAP_GPIO_IRQ(4),
.max_speed_hz = 120000 /* max sample rate at 3V */
* 26 /* command + data + overhead */,
.bus_num = 2,
@@ -471,6 +465,7 @@ static void __init osk_mistral_init(void)
gpio_direction_input(4);
irq_set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING);
+ mistral_boardinfo[0].irq = gpio_to_irq(4);
spi_register_board_info(mistral_boardinfo,
ARRAY_SIZE(mistral_boardinfo));
@@ -542,6 +537,10 @@ static void __init osk_init(void)
osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys();
osk_flash_resource.end += SZ_32M - 1;
+ osk5912_smc91x_resources[1].start = gpio_to_irq(0);
+ osk5912_smc91x_resources[1].end = gpio_to_irq(0);
+ osk5912_cf_resources[0].start = gpio_to_irq(62);
+ osk5912_cf_resources[0].end = gpio_to_irq(62);
platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices));
l = omap_readl(USB_TRANSCEIVER_CTRL);
@@ -556,6 +555,7 @@ static void __init osk_init(void)
gpio_direction_input(OMAP_MPUIO(1));
omap_serial_init();
+ osk_i2c_board_info[0].irq = gpio_to_irq(OMAP_MPUIO(1));
omap_register_i2c_bus(1, 400, osk_i2c_board_info,
ARRAY_SIZE(osk_i2c_board_info));
osk_mistral_init();
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index 0863d8e2bdf1..a60e6c22f816 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -217,7 +217,6 @@ static struct spi_board_info palmte_spi_info[] __initdata = {
.modalias = "tsc2102",
.bus_num = 2, /* uWire (officially) */
.chip_select = 0, /* As opposed to 3 */
- .irq = OMAP_GPIO_IRQ(PALMTE_PINTDAV_GPIO),
.max_speed_hz = 8000000,
},
};
@@ -251,6 +250,7 @@ static void __init omap_palmte_init(void)
platform_add_devices(palmte_devices, ARRAY_SIZE(palmte_devices));
+ palmte_spi_info[0].irq = gpio_to_irq(PALMTE_PINTDAV_GPIO);
spi_register_board_info(palmte_spi_info, ARRAY_SIZE(palmte_spi_info));
palmte_misc_gpio_setup();
omap_serial_init();
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 4ff699c509c0..8d854878547b 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -257,7 +257,6 @@ static struct spi_board_info __initdata palmtt_boardinfo[] = {
/* MicroWire (bus 2) CS0 has an ads7846e */
.modalias = "ads7846",
.platform_data = &palmtt_ts_info,
- .irq = OMAP_GPIO_IRQ(6),
.max_speed_hz = 120000 /* max sample rate at 3V */
* 26 /* command + data + overhead */,
.bus_num = 2,
@@ -298,6 +297,7 @@ static void __init omap_palmtt_init(void)
platform_add_devices(palmtt_devices, ARRAY_SIZE(palmtt_devices));
+ palmtt_boardinfo[0].irq = gpio_to_irq(6);
spi_register_board_info(palmtt_boardinfo,ARRAY_SIZE(palmtt_boardinfo));
omap_serial_init();
omap1_usb_init(&palmtt_usb_config);
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index abcbbd339aeb..a2c5abcd7c84 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -224,7 +224,6 @@ static struct spi_board_info __initdata palmz71_boardinfo[] = { {
/* MicroWire (bus 2) CS0 has an ads7846e */
.modalias = "ads7846",
.platform_data = &palmz71_ts_info,
- .irq = OMAP_GPIO_IRQ(PALMZ71_PENIRQ_GPIO),
.max_speed_hz = 120000 /* max sample rate at 3V */
* 26 /* command + data + overhead */,
.bus_num = 2,
@@ -313,6 +312,7 @@ omap_palmz71_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
+ palmz71_boardinfo[0].irq = gpio_to_irq(PALMZ71_PENIRQ_GPIO);
spi_register_board_info(palmz71_boardinfo,
ARRAY_SIZE(palmz71_boardinfo));
omap1_usb_init(&palmz71_usb_config);
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 659d0f75de2c..37232d04233f 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -44,7 +44,6 @@
static struct plat_serial8250_port voiceblue_ports[] = {
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x40000),
- .irq = OMAP_GPIO_IRQ(12),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
@@ -52,7 +51,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
},
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x50000),
- .irq = OMAP_GPIO_IRQ(13),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
@@ -60,7 +58,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
},
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x60000),
- .irq = OMAP_GPIO_IRQ(14),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
@@ -68,7 +65,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
},
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x70000),
- .irq = OMAP_GPIO_IRQ(15),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
@@ -80,9 +76,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
- .dev = {
- .platform_data = voiceblue_ports,
- },
};
static int __init ext_uart_init(void)
@@ -90,6 +83,11 @@ static int __init ext_uart_init(void)
if (!machine_is_voiceblue())
return -ENODEV;
+ voiceblue_ports[0].irq = gpio_to_irq(12);
+ voiceblue_ports[1].irq = gpio_to_irq(13);
+ voiceblue_ports[2].irq = gpio_to_irq(14);
+ voiceblue_ports[3].irq = gpio_to_irq(15);
+ serial_device.dev.platform_data = voiceblue_ports;
return platform_device_register(&serial_device);
}
arch_initcall(ext_uart_init);
@@ -128,8 +126,6 @@ static struct resource voiceblue_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = OMAP_GPIO_IRQ(8),
- .end = OMAP_GPIO_IRQ(8),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
@@ -275,6 +271,8 @@ static void __init voiceblue_init(void)
irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
+ voiceblue_smc91x_resources[1].start = gpio_to_irq(8);
+ voiceblue_smc91x_resources[1].end = gpio_to_irq(8);
platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
omap_board_config = voiceblue_config;
omap_board_config_size = ARRAY_SIZE(voiceblue_config);
diff --git a/arch/arm/mach-omap1/flash.c b/arch/arm/mach-omap1/flash.c
index f9bf78d4fdfb..401eb3c080c2 100644
--- a/arch/arm/mach-omap1/flash.c
+++ b/arch/arm/mach-omap1/flash.c
@@ -17,20 +17,12 @@
void omap1_set_vpp(struct platform_device *pdev, int enable)
{
- static int count;
u32 l;
- if (enable) {
- if (count++ == 0) {
- l = omap_readl(EMIFS_CONFIG);
- l |= OMAP_EMIFS_CONFIG_WP;
- omap_writel(l, EMIFS_CONFIG);
- }
- } else {
- if (count && (--count == 0)) {
- l = omap_readl(EMIFS_CONFIG);
- l &= ~OMAP_EMIFS_CONFIG_WP;
- omap_writel(l, EMIFS_CONFIG);
- }
- }
+ l = omap_readl(EMIFS_CONFIG);
+ if (enable)
+ l |= OMAP_EMIFS_CONFIG_WP;
+ else
+ l &= ~OMAP_EMIFS_CONFIG_WP;
+ omap_writel(l, EMIFS_CONFIG);
}
diff --git a/arch/arm/mach-omap1/include/mach/entry-macro.S b/arch/arm/mach-omap1/include/mach/entry-macro.S
index fa0f32a686aa..88f08cab1717 100644
--- a/arch/arm/mach-omap1/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap1/include/mach/entry-macro.S
@@ -11,7 +11,6 @@
*/
#include <mach/hardware.h>
-#include <mach/io.h>
#include <mach/irqs.h>
#include "../../iomap.h"
diff --git a/arch/arm/mach-omap1/include/mach/io.h b/arch/arm/mach-omap1/include/mach/io.h
deleted file mode 100644
index 37b12e1fd022..000000000000
--- a/arch/arm/mach-omap1/include/mach/io.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * arch/arm/mach-omap1/include/mach/io.h
- *
- * IO definitions for TI OMAP processors and boards
- *
- * Copied from arch/arm/mach-sa1100/include/mach/io.h
- * Copyright (C) 1997-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Modifications:
- * 06-12-1997 RMK Created.
- * 07-04-1999 RMK Major cleanup
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-omap1/iomap.h b/arch/arm/mach-omap1/iomap.h
index d68175761c3d..330c4716b028 100644
--- a/arch/arm/mach-omap1/iomap.h
+++ b/arch/arm/mach-omap1/iomap.h
@@ -22,12 +22,6 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifdef __ASSEMBLER__
-#define IOMEM(x) (x)
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
#define OMAP1_IO_OFFSET 0x01000000 /* Virtual IO = 0xfefb0000 */
#define OMAP1_IO_ADDRESS(pa) IOMEM((pa) - OMAP1_IO_OFFSET)
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 306beaca14c5..f66c32912b22 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -44,6 +44,7 @@
#include <linux/io.h>
#include <linux/atomic.h>
+#include <asm/system_misc.h>
#include <asm/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-omap1/sleep.S b/arch/arm/mach-omap1/sleep.S
index 0779db150da7..0e628743bd03 100644
--- a/arch/arm/mach-omap1/sleep.S
+++ b/arch/arm/mach-omap1/sleep.S
@@ -36,8 +36,6 @@
#include <asm/assembler.h>
-#include <mach/io.h>
-
#include "iomap.h"
#include "pm.h"
diff --git a/arch/arm/mach-omap1/sram.S b/arch/arm/mach-omap1/sram.S
index 2ce0b9ab20e5..00e9d9e9adf1 100644
--- a/arch/arm/mach-omap1/sram.S
+++ b/arch/arm/mach-omap1/sram.S
@@ -12,7 +12,6 @@
#include <asm/assembler.h>
-#include <mach/io.h>
#include <mach/hardware.h>
#include "iomap.h"
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index c8bda62900d8..e658f835d0de 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -230,12 +230,12 @@ static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
{
I2C_BOARD_INFO("isp1301_omap", 0x2D),
.flags = I2C_CLIENT_WAKE,
- .irq = OMAP_GPIO_IRQ(78),
},
};
static int __init omap2430_i2c_init(void)
{
+ sdp2430_i2c1_boardinfo[0].irq = gpio_to_irq(78);
omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
ARRAY_SIZE(sdp2430_i2c1_boardinfo));
omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 37dcb1bc025e..a39fc4bbd2b8 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -907,7 +907,6 @@ static void __init omap4_sdp4430_wifi_mux_init(void)
}
static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = {
- .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
.board_ref_clock = WL12XX_REFCLOCK_26,
.board_tcxo_clock = WL12XX_TCXOCLOCK_26,
};
@@ -917,6 +916,7 @@ static void __init omap4_sdp4430_wifi_init(void)
int ret;
omap4_sdp4430_wifi_mux_init();
+ omap4_sdp4430_wlan_data.irq = gpio_to_irq(GPIO_WIFI_IRQ);
ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
if (ret)
pr_err("Error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index ac773829941f..768ece2e9c3b 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -136,8 +136,6 @@ static struct resource apollon_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = OMAP_GPIO_IRQ(APOLLON_ETHR_GPIO_IRQ),
- .end = OMAP_GPIO_IRQ(APOLLON_ETHR_GPIO_IRQ),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
@@ -341,6 +339,8 @@ static void __init omap_apollon_init(void)
* You have to mux them off in device drivers later on
* if not needed.
*/
+ apollon_smc91x_resources[1].start = gpio_to_irq(APOLLON_ETHR_GPIO_IRQ);
+ apollon_smc91x_resources[1].end = gpio_to_irq(APOLLON_ETHR_GPIO_IRQ);
platform_add_devices(apollon_devices, ARRAY_SIZE(apollon_devices));
omap_serial_init();
omap_sdrc_init(NULL, NULL);
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 11cd2a806093..a2010f07de31 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -411,7 +411,6 @@ static struct resource omap_dm9000_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = OMAP_GPIO_IRQ(OMAP_DM9000_GPIO_IRQ),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
@@ -639,6 +638,7 @@ static void __init devkit8000_init(void)
omap_hsmmc_init(mmc);
devkit8000_i2c_init();
+ omap_dm9000_resources[2].start = gpio_to_irq(OMAP_DM9000_GPIO_IRQ);
platform_add_devices(devkit8000_devices,
ARRAY_SIZE(devkit8000_devices));
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 54af800d143c..0bbbabe28fcc 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -348,7 +348,6 @@ static struct at24_platform_data m24c01 = {
static struct i2c_board_info __initdata h4_i2c_board_info[] = {
{
I2C_BOARD_INFO("isp1301_omap", 0x2d),
- .irq = OMAP_GPIO_IRQ(125),
},
{ /* EEPROM on mainboard */
I2C_BOARD_INFO("24c01", 0x52),
@@ -377,6 +376,7 @@ static void __init omap_h4_init(void)
*/
board_mkp_init();
+ h4_i2c_board_info[0].irq = gpio_to_irq(125);
i2c_register_board_info(1, h4_i2c_board_info,
ARRAY_SIZE(h4_i2c_board_info));
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index a659e198892b..4c90f078abe1 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -487,7 +487,6 @@ static struct platform_device omap3evm_wlan_regulator = {
};
struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
- .irq = OMAP_GPIO_IRQ(OMAP3EVM_WLAN_IRQ_GPIO),
.board_ref_clock = WL12XX_REFCLOCK_38, /* 38.4 MHz */
};
#endif
@@ -623,6 +622,7 @@ static void __init omap3_evm_wl12xx_init(void)
int ret;
/* WL12xx WLAN Init */
+ omap3evm_wlan_data.irq = gpio_to_irq(OMAP3EVM_WLAN_IRQ_GPIO);
ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 8bf8e99c358e..d8c0e89f0126 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -231,7 +231,6 @@ static struct platform_device omap_vwlan_device = {
};
struct wl12xx_platform_data omap_panda_wlan_data __initdata = {
- .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
/* PANDA ref clock is 38.4 MHz */
.board_ref_clock = 2,
};
@@ -558,6 +557,7 @@ static void __init omap4_panda_init(void)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
+ omap_panda_wlan_data.irq = gpio_to_irq(GPIO_WIFI_IRQ);
ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f120997309af..d87ee0612098 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -170,7 +170,6 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
.modalias = "tsc2005",
.bus_num = 1,
.chip_select = 0,
- .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),
.max_speed_hz = 6000000,
.controller_data = &tsc2005_mcspi_config,
.platform_data = &tsc2005_pdata,
@@ -1129,6 +1128,8 @@ static void __init rx51_init_tsc2005(void)
}
tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
+ rx51_peripherals_spi_board_info[RX51_SPI_TSC2005].irq =
+ gpio_to_irq(RX51_TSC2005_IRQ_GPIO);
}
void __init rx51_peripherals_init(void)
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index 369c2eb7715b..1e8540eabde9 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -43,7 +43,6 @@ static inline void __init zoom_init_smsc911x(void)
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = ZOOM_UART_BASE,
- .irq = OMAP_GPIO_IRQ(102),
.flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ,
.irqflags = IRQF_SHARED | IRQF_TRIGGER_RISING,
.iotype = UPIO_MEM,
@@ -89,6 +88,8 @@ static inline void __init zoom_init_quaduart(void)
if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0)
printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n",
quart_gpio);
+
+ serial_platform_data[0].irq = gpio_to_irq(102);
}
static inline int omap_zoom_debugboard_detect(void)
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 3d39cdb2e250..b797cb279618 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -193,7 +193,6 @@ static struct platform_device omap_vwlan_device = {
};
static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
- .irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO),
/* ZOOM ref clock is 26 MHz */
.board_ref_clock = 1,
};
@@ -297,7 +296,10 @@ static void enable_board_wakeup_source(void)
void __init zoom_peripherals_init(void)
{
- int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
+ int ret;
+
+ omap_zoom_wlan_data.irq = gpio_to_irq(OMAP_ZOOM_WLAN_IRQ_GPIO);
+ ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 981b9f9111a4..480fb8f09aed 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/list.h>
+#include <linux/io.h>
#include <plat/hardware.h>
#include <plat/clkdev_omap.h>
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 79b98f22f207..c03c1108468e 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <plat/hardware.h>
#include <plat/clkdev_omap.h>
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 9498b0f5fbd0..1706ebcec08d 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -76,7 +76,7 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
}
spi_bi->bus_num = bus_num;
- spi_bi->irq = OMAP_GPIO_IRQ(gpio_pendown);
+ spi_bi->irq = gpio_to_irq(gpio_pendown);
if (board_pdata) {
board_pdata->gpio_pendown = gpio_pendown;
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 464cffde58fe..535866489ce3 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -87,29 +87,14 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
return 0;
}
-/**
- * omap3_enter_idle - Programs OMAP3 to enter the specified state
- * @dev: cpuidle device
- * @drv: cpuidle driver
- * @index: the index of state to be entered
- *
- * Called from the CPUidle framework to program the device to the
- * specified target state selected by the governor.
- */
-static int omap3_enter_idle(struct cpuidle_device *dev,
+static int __omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
struct omap3_idle_statedata *cx =
cpuidle_get_statedata(&dev->states_usage[index]);
- struct timespec ts_preidle, ts_postidle, ts_idle;
u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
- int idle_time;
-
- /* Used to keep track of the total time in idle */
- getnstimeofday(&ts_preidle);
- local_irq_disable();
local_fiq_disable();
pwrdm_set_next_pwrst(mpu_pd, mpu_state);
@@ -148,22 +133,29 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
}
return_sleep_time:
- getnstimeofday(&ts_postidle);
- ts_idle = timespec_sub(ts_postidle, ts_preidle);
- local_irq_enable();
local_fiq_enable();
- idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
- USEC_PER_SEC;
-
- /* Update cpuidle counters */
- dev->last_residency = idle_time;
-
return index;
}
/**
+ * omap3_enter_idle - Programs OMAP3 to enter the specified state
+ * @dev: cpuidle device
+ * @drv: cpuidle driver
+ * @index: the index of state to be entered
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static inline int omap3_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
+}
+
+/**
* next_valid_state - Find next valid C-state
* @dev: cpuidle device
* @drv: cpuidle driver
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 72e018b9b260..f386cbe9c889 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -62,15 +62,9 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
{
struct omap4_idle_statedata *cx =
cpuidle_get_statedata(&dev->states_usage[index]);
- struct timespec ts_preidle, ts_postidle, ts_idle;
u32 cpu1_state;
- int idle_time;
int cpu_id = smp_processor_id();
- /* Used to keep track of the total time in idle */
- getnstimeofday(&ts_preidle);
-
- local_irq_disable();
local_fiq_disable();
/*
@@ -128,26 +122,17 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
if (index > 0)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
- getnstimeofday(&ts_postidle);
- ts_idle = timespec_sub(ts_postidle, ts_preidle);
-
- local_irq_enable();
local_fiq_enable();
- idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
- USEC_PER_SEC;
-
- /* Update cpuidle counters */
- dev->last_residency = idle_time;
-
return index;
}
DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
struct cpuidle_driver omap4_idle_driver = {
- .name = "omap4_idle",
- .owner = THIS_MODULE,
+ .name = "omap4_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
};
static inline void _fill_cstate(struct cpuidle_driver *drv,
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 9706c648bc19..db5a88a36c63 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -99,7 +99,7 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initdata = {
{ "dss_hdmi", "omapdss_hdmi", -1 },
};
-static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
+static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
{
u32 reg;
u16 control_i2c_1;
@@ -125,7 +125,7 @@ static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
}
}
-static int __init omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
+static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
{
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
@@ -166,7 +166,7 @@ int __init omap_hdmi_init(enum omap_hdmi_flags flags)
return 0;
}
-static int __init omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
+static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
if (cpu_is_omap44xx())
return omap4_dsi_mux_pads(dsi_id, lane_mask);
@@ -174,7 +174,7 @@ static int __init omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
return 0;
}
-static void __init omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
+static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
{
if (cpu_is_omap44xx())
omap4_dsi_mux_pads(dsi_id, 0);
diff --git a/arch/arm/mach-omap2/include/mach/io.h b/arch/arm/mach-omap2/include/mach/io.h
deleted file mode 100644
index b8758c8a9394..000000000000
--- a/arch/arm/mach-omap2/include/mach/io.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * arch/arm/mach-omap2/include/mach/io.h
- *
- * IO definitions for TI OMAP processors and boards
- *
- * Copied from arch/arm/mach-sa1100/include/mach/io.h
- * Copyright (C) 1997-1999 Russell King
- *
- * Copyright (C) 2009 Texas Instruments
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Modifications:
- * 06-12-1997 RMK Created.
- * 07-04-1999 RMK Major cleanup
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index e6f958165296..0812b154f5b5 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -22,12 +22,6 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifdef __ASSEMBLER__
-#define IOMEM(x) (x)
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
#define OMAP2_L3_IO_OFFSET 0x90000000
#define OMAP2_L3_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L3_IO_OFFSET) /* L3 */
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index a7bdec69a2b3..d0c1c9695996 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -17,6 +17,8 @@
#include <linux/export.h>
#include <linux/suspend.h>
+#include <asm/system_misc.h>
+
#include <plat/omap-pm.h>
#include <plat/omap_device.h>
#include "common.h"
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index d2513ac79ff5..2e6454c8d4ba 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -57,5 +57,14 @@ struct meminfo;
struct tag;
extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *);
+/*****************************************************************************
+ * Helpers to access Orion registers
+ ****************************************************************************/
+/*
+ * These are not preempt-safe. Locks, if needed, must be taken
+ * care of by the caller.
+ */
+#define orion5x_setbits(r, mask) writel(readl(r) | (mask), (r))
+#define orion5x_clrbits(r, mask) writel(readl(r) & ~(mask), (r))
#endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
deleted file mode 100644
index e9d9afdc2659..000000000000
--- a/arch/arm/mach-orion5x/include/mach/io.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/io.h
- *
- * Tzachi Perelstein <tzachi@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
-
-#include "orion5x.h"
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-
-/*****************************************************************************
- * Helpers to access Orion registers
- ****************************************************************************/
-/*
- * These are not preempt-safe. Locks, if needed, must be taken
- * care of by the caller.
- */
-#define orion5x_setbits(r, mask) writel(readl(r) | (mask), (r))
-#define orion5x_clrbits(r, mask) writel(readl(r) & ~(mask), (r))
-
-
-#endif
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index d6a91948e4dc..cb19e1661bb3 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -19,6 +19,7 @@
#include <asm/mach/pci.h>
#include <plat/pcie.h>
#include <plat/addr-map.h>
+#include <mach/orion5x.h>
#include "common.h"
/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index c9abb8fbfa70..7189827d641d 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -15,6 +15,7 @@
#include <linux/mv643xx_eth.h>
#include <linux/timex.h>
#include <linux/serial_reg.h>
+#include <mach/orion5x.h>
#include "tsx09-common.h"
#include "common.h"
diff --git a/arch/arm/mach-picoxcell/include/mach/io.h b/arch/arm/mach-picoxcell/include/mach/io.h
deleted file mode 100644
index 7573ec7d10a3..000000000000
--- a/arch/arm/mach-picoxcell/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No ioports, but needed for driver compatibility. */
-#define __io(a) __typesafe_io(a)
-/* No PCI possible on picoxcell. */
-#define __mem_pci(a) (a)
-
-#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-picoxcell/include/mach/irqs.h b/arch/arm/mach-picoxcell/include/mach/irqs.h
deleted file mode 100644
index 59eac1ee2820..000000000000
--- a/arch/arm/mach-picoxcell/include/mach/irqs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __MACH_IRQS_H
-#define __MACH_IRQS_H
-
-/* We dynamically allocate our irq_desc's. */
-#define NR_IRQS 0
-
-#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-pnx4008/include/mach/io.h b/arch/arm/mach-pnx4008/include/mach/io.h
deleted file mode 100644
index cbf0904540ea..000000000000
--- a/arch/arm/mach-pnx4008/include/mach/io.h
+++ /dev/null
@@ -1,21 +0,0 @@
-
-/*
- * arch/arm/mach-pnx4008/include/mach/io.h
- *
- * Author: Dmitry Chigirev <chigirev@ru.mvista.com>
- *
- * 2005 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-prima2/include/mach/io.h b/arch/arm/mach-prima2/include/mach/io.h
deleted file mode 100644
index 6c31e9ec279e..000000000000
--- a/arch/arm/mach-prima2/include/mach/io.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/mach-prima2/include/mach/io.h
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef __MACH_PRIMA2_IO_H
-#define __MACH_PRIMA2_IO_H
-
-#define IO_SPACE_LIMIT ((resource_size_t)0)
-
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c
index b7a6091ce791..0d024b1e916d 100644
--- a/arch/arm/mach-prima2/timer.c
+++ b/arch/arm/mach-prima2/timer.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <mach/map.h>
+#include <asm/sched_clock.h>
#include <asm/mach/time.h>
#define SIRFSOC_TIMER_COUNTER_LO 0x0000
@@ -165,21 +166,9 @@ static struct irqaction sirfsoc_timer_irq = {
};
/* Overwrite weak default sched_clock with more precise one */
-unsigned long long notrace sched_clock(void)
+static u32 notrace sirfsoc_read_sched_clock(void)
{
- static int is_mapped;
-
- /*
- * sched_clock is called earlier than .init of sys_timer
- * if we map timer memory in .init of sys_timer, system
- * will panic due to illegal memory access
- */
- if (!is_mapped) {
- sirfsoc_of_timer_map();
- is_mapped = 1;
- }
-
- return sirfsoc_timer_read(NULL) * (NSEC_PER_SEC / CLOCK_TICK_RATE);
+ return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
}
static void __init sirfsoc_clockevent_init(void)
@@ -210,6 +199,8 @@ static void __init sirfsoc_timer_init(void)
BUG_ON(rate < CLOCK_TICK_RATE);
BUG_ON(rate % CLOCK_TICK_RATE);
+ sirfsoc_of_timer_map();
+
writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
@@ -217,6 +208,8 @@ static void __init sirfsoc_timer_init(void)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
+ setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
+
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
sirfsoc_clockevent_init();
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 61d3c72ded84..109ccd2a8885 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -108,6 +108,7 @@ config CSB726_CSB701
config MACH_ARMCORE
bool "CompuLab CM-X255/CM-X270 modules"
+ select ARCH_HAS_DMA_SET_COHERENT_MASK if PCI
select PXA27x
select IWMMXT
select PXA25x
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index c91727d1fe09..9a8760b72913 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -150,6 +150,7 @@ MACHINE_START(CAPC7117,
"Embedian CAPC-7117 evaluation kit based on the MXM-8x10 CoM")
.atag_offset = 0x100,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/clock-pxa2xx.c b/arch/arm/mach-pxa/clock-pxa2xx.c
index 1d5859d9a0e3..9ee2ad6a0a07 100644
--- a/arch/arm/mach-pxa/clock-pxa2xx.c
+++ b/arch/arm/mach-pxa/clock-pxa2xx.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/syscore_ops.h>
#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 895ee8c45009..313274016277 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -714,7 +714,6 @@ struct da9030_battery_info cm_x300_battery_info = {
static struct regulator_consumer_supply buck2_consumers[] = {
{
- .dev = NULL,
.supply = "vcc_core",
},
};
@@ -854,6 +853,7 @@ static void __init cm_x300_fixup(struct tag *tags, char **cmdline,
MACHINE_START(CM_X300, "CM-X300 module")
.atag_offset = 0x100,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 29d5d541f602..b2f227d36125 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -310,6 +310,7 @@ MACHINE_START(COLIBRI, "Toradex Colibri PXA270")
.atag_offset = 0x100,
.init_machine = colibri_pxa270_init,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
@@ -320,6 +321,7 @@ MACHINE_START(INCOME, "Income s.r.o. SH-Dmaster PXA270 SBC")
.atag_offset = 0x100,
.init_machine = colibri_pxa270_income_init,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index 0846d210cb05..bb6def8ec979 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -186,6 +186,7 @@ MACHINE_START(COLIBRI300, "Toradex Colibri PXA300")
.atag_offset = 0x100,
.init_machine = colibri_pxa300_init,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 6ad3359063af..d88e7b37f1da 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -256,6 +256,7 @@ MACHINE_START(COLIBRI320, "Toradex Colibri PXA320")
.atag_offset = 0x100,
.init_machine = colibri_pxa320_init,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index de9d45e673fd..c1fe32db4755 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -729,6 +729,7 @@ static void __init fixup_corgi(struct tag *tags, char **cmdline,
MACHINE_START(CORGI, "SHARP Corgi")
.fixup = fixup_corgi,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = corgi_init,
@@ -741,6 +742,7 @@ MACHINE_END
MACHINE_START(SHEPHERD, "SHARP Shepherd")
.fixup = fixup_corgi,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = corgi_init,
@@ -753,6 +755,7 @@ MACHINE_END
MACHINE_START(HUSKY, "SHARP Husky")
.fixup = fixup_corgi,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = corgi_init,
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index 39e265cfc86d..048c4299473c 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 88fbec05ec50..b85b4ab7aac6 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <mach/pxa3xx-regs.h>
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index fb5a51d834e5..67f0de37f46e 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -274,6 +274,7 @@ static void __init csb726_init(void)
MACHINE_START(CSB726, "Cogent CSB726")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = csb726_init,
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 84f2d7015cfe..166eee5b8a70 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -12,6 +12,7 @@
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <mach/irda.h>
+#include <mach/irqs.h>
#include <mach/ohci.h>
#include <plat/pxa27x_keypad.h>
#include <mach/camera.h>
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index d80c0ba9a095..16ec557b8e43 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1083,19 +1083,19 @@ static void __init em_x270_userspace_consumers_init(void)
}
/* DA9030 related initializations */
-#define REGULATOR_CONSUMER(_name, _dev, _supply) \
+#define REGULATOR_CONSUMER(_name, _dev_name, _supply) \
static struct regulator_consumer_supply _name##_consumers[] = { \
{ \
- .dev = _dev, \
+ .dev_name = _dev_name, \
.supply = _supply, \
}, \
}
-REGULATOR_CONSUMER(ldo3, &em_x270_gps_userspace_consumer.dev, "vcc gps");
+REGULATOR_CONSUMER(ldo3, "reg-userspace-consumer.0", "vcc gps");
REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
-REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio");
+REGULATOR_CONSUMER(ldo10, "pxa2xx-mci", "vcc sdio");
REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
-REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs");
+REGULATOR_CONSUMER(ldo19, "reg-userspace-consumer.1", "vcc gprs");
REGULATOR_CONSUMER(buck2, NULL, "vcc_core");
#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
@@ -1301,6 +1301,7 @@ static void __init em_x270_init(void)
MACHINE_START(EM_X270, "Compulab EM-X270")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
@@ -1311,6 +1312,7 @@ MACHINE_END
MACHINE_START(EXEDA, "Compulab eXeda")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index ac3b1cef4751..e529a35a44ce 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -235,6 +235,7 @@ static void __init gumstix_init(void)
MACHINE_START(GUMSTIX, "Gumstix")
.atag_offset = 0x100, /* match u-boot bi_boot_params */
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/h5000.c b/arch/arm/mach-pxa/h5000.c
index fde6b4c873c4..e7dec589f014 100644
--- a/arch/arm/mach-pxa/h5000.c
+++ b/arch/arm/mach-pxa/h5000.c
@@ -205,6 +205,7 @@ static void __init h5000_init(void)
MACHINE_START(H5400, "HP iPAQ H5000")
.atag_offset = 0x100,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/himalaya.c b/arch/arm/mach-pxa/himalaya.c
index 26d069a9f900..2962de898da9 100644
--- a/arch/arm/mach-pxa/himalaya.c
+++ b/arch/arm/mach-pxa/himalaya.c
@@ -160,6 +160,7 @@ static void __init himalaya_init(void)
MACHINE_START(HIMALAYA, "HTC Himalaya")
.atag_offset = 0x100,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = himalaya_init,
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 3fa929d4a4f5..b83b95a29503 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -681,11 +681,9 @@ static struct platform_device power_supply = {
static struct regulator_consumer_supply bq24022_consumers[] = {
{
- .dev = &gpio_vbus.dev,
.supply = "vbus_draw",
},
{
- .dev = &power_supply.dev,
.supply = "ac_draw",
},
};
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 67400192ed3b..1d02eabc9c65 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -193,6 +193,7 @@ static void __init icontrol_init(void)
MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
.atag_offset = 0x100,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 8af1840e12cc..6ff466bd43e8 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -195,6 +195,7 @@ static void __init idp_map_io(void)
MACHINE_START(PXA_IDP, "Vibren PXA255 IDP")
/* Maintainer: Vibren Technologies */
.map_io = idp_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 8184669dde28..56d92e5cad85 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -40,7 +40,6 @@
#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1))
#ifndef __ASSEMBLY__
-# define IOMEM(x) ((void __iomem *)(x))
# define __REG(x) (*((volatile u32 __iomem *)io_p2v(x)))
/* With indexed regs we don't want to feed the index through io_p2v()
@@ -52,7 +51,6 @@
#else
-# define IOMEM(x) x
# define __REG(x) io_p2v(x)
# define __PREG(x) io_v2p(x)
@@ -337,8 +335,4 @@ extern unsigned int get_memclk_frequency_10khz(void);
extern unsigned long get_clock_tick_rate(void);
#endif
-#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
-#define ARCH_HAS_DMA_SET_COHERENT_MASK
-#endif
-
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
deleted file mode 100644
index fdca3be47d9b..000000000000
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-pxa/include/mach/io.h
- *
- * Copied from asm/arch/sa1100/io.h
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#include <mach/hardware.h>
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 32975adf3ca4..8765782dd955 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -100,7 +100,7 @@
*/
#define IRQ_BOARD_START (PXA_GPIO_IRQ_BASE + PXA_NR_BUILTIN_GPIO)
-#define NR_IRQS (IRQ_BOARD_START)
+#define PXA_NR_IRQS (IRQ_BOARD_START)
#ifndef __ASSEMBLY__
struct irq_data;
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h
index 4c2d11cd824d..1bfc4e822a41 100644
--- a/arch/arm/mach-pxa/include/mach/mainstone.h
+++ b/arch/arm/mach-pxa/include/mach/mainstone.h
@@ -13,6 +13,8 @@
#ifndef ASM_ARCH_MAINSTONE_H
#define ASM_ARCH_MAINSTONE_H
+#include <mach/irqs.h>
+
#define MST_ETH_PHYS PXA_CS4_PHYS
#define MST_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 6f4785b347c2..8de0651d7efb 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -580,11 +580,9 @@ static struct platform_device power_supply = {
static struct regulator_consumer_supply bq24022_consumers[] = {
{
- .dev = &gpio_vbus.dev,
.supply = "vbus_draw",
},
{
- .dev = &power_supply.dev,
.supply = "ac_draw",
},
};
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 29b62afc6f7c..b0a842887780 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/syscore_ops.h>
#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index e80a3db735c2..061d57009cee 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -758,6 +758,7 @@ MACHINE_START(MIOA701, "MIO A701")
.atag_offset = 0x100,
.restart_mode = 's',
.map_io = &pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = &pxa27x_init_irq,
.handle_irq = &pxa27x_handle_irq,
.init_machine = mioa701_machine_init,
diff --git a/arch/arm/mach-pxa/mp900.c b/arch/arm/mach-pxa/mp900.c
index 169bf8f97af0..152efbf093f6 100644
--- a/arch/arm/mach-pxa/mp900.c
+++ b/arch/arm/mach-pxa/mp900.c
@@ -95,6 +95,7 @@ MACHINE_START(NEC_MP900, "MobilePro900/C")
.atag_offset = 0x220100,
.timer = &pxa_timer,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = mp900c_init,
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 1fa80f4f80c8..31e0433d83ba 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -344,6 +344,7 @@ static void __init palmld_init(void)
MACHINE_START(PALMLD, "Palm LifeDrive")
.atag_offset = 0x100,
.map_io = palmld_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 5ba14316bd9c..0f6bd4fcfa3b 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -205,6 +205,7 @@ MACHINE_START(PALMT5, "Palm Tungsten|T5")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
.reserve = palmt5_reserve,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 29b51b40f09d..e2d97eed07a7 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -539,6 +539,7 @@ static void __init palmtc_init(void)
MACHINE_START(PALMTC, "Palm Tungsten|C")
.atag_offset = 0x100,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 5ebf49acb827..c054827c567f 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -358,6 +358,7 @@ static void __init palmte2_init(void)
MACHINE_START(PALMTE2, "Palm Tungsten|E2")
.atag_offset = 0x100,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index ec8249156c08..fbdebee39a53 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -448,6 +448,7 @@ MACHINE_START(TREO680, "Palm Treo 680")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
.reserve = treo_reserve,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
@@ -461,6 +462,7 @@ MACHINE_START(CENTRO, "Palm Centro 685")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
.reserve = treo_reserve,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 6170d76dfba8..9507605ed547 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -366,6 +366,7 @@ static void __init palmtx_init(void)
MACHINE_START(PALMTX, "Palm T|X")
.atag_offset = 0x100,
.map_io = palmtx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index b2dff9d415eb..a97b59965bb9 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -401,6 +401,7 @@ static void __init palmz72_init(void)
MACHINE_START(PALMZ72, "Palm Zire72")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/pxa2xx.c b/arch/arm/mach-pxa/pxa2xx.c
index 868270421b8c..f8ec85450c42 100644
--- a/arch/arm/mach-pxa/pxa2xx.c
+++ b/arch/arm/mach-pxa/pxa2xx.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index 40bb16501d86..17cbc0c7bdb8 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/pxa300.h>
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index 8d614ecd8e99..6dc99d4f2dc6 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/pxa320.h>
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 1570d457fea3..dffb7e813d98 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -31,6 +31,7 @@
#include <mach/pm.h>
#include <mach/dma.h>
#include <mach/smemc.h>
+#include <mach/irqs.h>
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 22818c7694a8..5905ed130e94 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -43,6 +43,8 @@
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
+#include <asm/system_info.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -1090,6 +1092,7 @@ MACHINE_START(RAUMFELD_RC, "Raumfeld Controller")
.atag_offset = 0x100,
.init_machine = raumfeld_controller_init,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
@@ -1102,6 +1105,7 @@ MACHINE_START(RAUMFELD_CONNECTOR, "Raumfeld Connector")
.atag_offset = 0x100,
.init_machine = raumfeld_connector_init,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
@@ -1114,6 +1118,7 @@ MACHINE_START(RAUMFELD_SPEAKER, "Raumfeld Speaker")
.atag_offset = 0x100,
.init_machine = raumfeld_speaker_init,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 0fe354efb931..86c95a5d8533 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -598,6 +598,7 @@ MACHINE_START(SAAR, "PXA930 Handheld Platform (aka SAAR)")
/* Maintainer: Eric Miao <eric.miao@marvell.com> */
.atag_offset = 0x100,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 30989baf7f2a..bdf4cb88ca0a 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -24,6 +24,7 @@
#include <linux/leds.h>
#include <linux/suspend.h>
#include <linux/gpio.h>
+#include <linux/io.h>
#include <asm/mach-types.h>
#include <mach/pm.h>
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index abf355d0c92f..df2ab0fb2ace 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -984,6 +984,7 @@ MACHINE_START(SPITZ, "SHARP Spitz")
.restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
@@ -997,6 +998,7 @@ MACHINE_START(BORZOI, "SHARP Borzoi")
.restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
@@ -1010,6 +1012,7 @@ MACHINE_START(AKITA, "SHARP Akita")
.restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index b0656e158d90..4cd645e29b64 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -152,7 +152,7 @@ static struct platform_device sht15 = {
static struct regulator_consumer_supply stargate2_sensor_3_con[] = {
{
- .dev = &sht15.dev,
+ .dev_name = "sht15",
.supply = "vcc",
},
};
@@ -1006,6 +1006,7 @@ static void __init stargate2_init(void)
#ifdef CONFIG_MACH_INTELMOTE2
MACHINE_START(INTELMOTE2, "IMOTE 2")
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
index 9fb38e80e076..736bfdc50ee6 100644
--- a/arch/arm/mach-pxa/tavorevb.c
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -491,6 +491,7 @@ MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)")
/* Maintainer: Eric Miao <eric.miao@marvell.com> */
.atag_offset = 0x100,
.map_io = pxa3xx_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index b503049d6d26..3d6c9bd90de6 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -22,6 +22,7 @@
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
#include <mach/regs-ost.h>
+#include <mach/irqs.h>
/*
* This is PXA's sched_clock implementation. This has a resolution
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 0f30af617d8f..2b6ac00b2cd9 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -558,6 +558,7 @@ MACHINE_START(TRIZEPS4, "Keith und Koep Trizeps IV module")
.atag_offset = 0x100,
.init_machine = trizeps4_init,
.map_io = trizeps4_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
@@ -569,6 +570,7 @@ MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module")
.atag_offset = 0x100,
.init_machine = trizeps4_init,
.map_io = trizeps4_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 7a3d342a7732..130379fb9d0f 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -995,6 +995,7 @@ MACHINE_START(VIPER, "Arcom/Eurotech VIPER SBC")
/* Maintainer: Marc Zyngier <maz@misterjones.org> */
.atag_offset = 0x100,
.map_io = viper_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = viper_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index 1f5cfa96f6d6..c57ab636ea9c 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -718,6 +718,7 @@ static void __init vpac270_init(void)
MACHINE_START(VPAC270, "Voipac PXA270")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 4bbe9a36fe74..4275713ccd10 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -182,6 +182,7 @@ MACHINE_START(XCEP, "Iskratel XCEP")
.atag_offset = 0x100,
.init_machine = xcep_init,
.map_io = pxa25x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index b6476848b561..fa8619970841 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -721,6 +721,7 @@ static void __init z2_init(void)
MACHINE_START(ZIPIT2, "Zipit Z2")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
+ .nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-realview/include/mach/hardware.h b/arch/arm/mach-realview/include/mach/hardware.h
index 8a638d15797f..281e71c97525 100644
--- a/arch/arm/mach-realview/include/mach/hardware.h
+++ b/arch/arm/mach-realview/include/mach/hardware.h
@@ -37,6 +37,6 @@
#else
#define IO_ADDRESS(x) (x)
#endif
-#define __io_address(n) __io(IO_ADDRESS(n))
+#define __io_address(n) IOMEM(IO_ADDRESS(n))
#endif
diff --git a/arch/arm/mach-realview/include/mach/io.h b/arch/arm/mach-realview/include/mach/io.h
deleted file mode 100644
index f05bcdf605d8..000000000000
--- a/arch/arm/mach-realview/include/mach/io.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-realview/include/mach/io.h
- *
- * Copyright (C) 2003 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h
index 050d63c74cc1..257166b21f3d 100644
--- a/arch/arm/mach-rpc/include/mach/hardware.h
+++ b/arch/arm/mach-rpc/include/mach/hardware.h
@@ -14,12 +14,6 @@
#include <mach/memory.h>
-#ifndef __ASSEMBLY__
-#define IOMEM(x) ((void __iomem *)(unsigned long)(x))
-#else
-#define IOMEM(x) x
-#endif /* __ASSEMBLY__ */
-
/*
* What hardware must be present
*/
diff --git a/arch/arm/mach-rpc/include/mach/io.h b/arch/arm/mach-rpc/include/mach/io.h
index 695f4ed2e11b..707071a7ea4e 100644
--- a/arch/arm/mach-rpc/include/mach/io.h
+++ b/arch/arm/mach-rpc/include/mach/io.h
@@ -28,9 +28,4 @@
*/
#define __io(a) (PCIO_BASE + ((a) << 2))
-/*
- * 1:1 mapping for ioremapped regions.
- */
-#define __mem_pci(x) (x)
-
#endif
diff --git a/arch/arm/mach-s3c24xx/include/mach/io.h b/arch/arm/mach-s3c24xx/include/mach/io.h
index 118749f37c4c..5dd1db4e2677 100644
--- a/arch/arm/mach-s3c24xx/include/mach/io.h
+++ b/arch/arm/mach-s3c24xx/include/mach/io.h
@@ -208,9 +208,4 @@ DECLARE_IO(int,l,"")
#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l)
#define outsl(p,d,l) __raw_writesl(__ioaddr(p),d,l)
-/*
- * 1:1 mapping for ioremapped regions.
- */
-#define __mem_pci(x) (x)
-
#endif
diff --git a/arch/arm/mach-s3c24xx/simtec-nor.c b/arch/arm/mach-s3c24xx/simtec-nor.c
index 2119ca6a73bc..b9d6d4f92c03 100644
--- a/arch/arm/mach-s3c24xx/simtec-nor.c
+++ b/arch/arm/mach-s3c24xx/simtec-nor.c
@@ -35,9 +35,7 @@
static void simtec_nor_vpp(struct platform_device *pdev, int vpp)
{
unsigned int val;
- unsigned long flags;
- local_irq_save(flags);
val = __raw_readb(BAST_VA_CTRL3);
printk(KERN_DEBUG "%s(%d)\n", __func__, vpp);
@@ -48,7 +46,6 @@ static void simtec_nor_vpp(struct platform_device *pdev, int vpp)
val &= ~BAST_CPLD_CTRL3_ROMWEN;
__raw_writeb(val, BAST_VA_CTRL3);
- local_irq_restore(flags);
}
static struct physmap_flash_data simtec_nor_pdata = {
diff --git a/arch/arm/mach-s3c64xx/include/mach/io.h b/arch/arm/mach-s3c64xx/include/mach/io.h
deleted file mode 100644
index de5716dbbd65..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* arch/arm/mach-s3c64xxinclude/mach/io.h
- *
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben-linux@fluff.org>
- *
- * Default IO routines for S3C64XX based
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif
diff --git a/arch/arm/mach-s5p64x0/include/mach/io.h b/arch/arm/mach-s5p64x0/include/mach/io.h
deleted file mode 100644
index a3e095c02fb5..000000000000
--- a/arch/arm/mach-s5p64x0/include/mach/io.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/include/mach/io.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben-linux@fluff.org>
- *
- * Default IO routines for S5P64X0 based
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif
diff --git a/arch/arm/mach-s5pc100/include/mach/io.h b/arch/arm/mach-s5pc100/include/mach/io.h
deleted file mode 100644
index 819acf5eaf89..000000000000
--- a/arch/arm/mach-s5pc100/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* arch/arm/mach-s5pc100/include/mach/io.h
- *
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben-linux@fluff.org>
- *
- * Default IO routines for S5PC100 systems
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif
diff --git a/arch/arm/mach-s5pv210/include/mach/io.h b/arch/arm/mach-s5pv210/include/mach/io.h
deleted file mode 100644
index 5ab9d560bc86..000000000000
--- a/arch/arm/mach-s5pv210/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/include/mach/io.h
- *
- * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s5p6442/include/mach/io.h
- *
- * Default IO routines for S5PV210
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H __FILE__
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-sa1100/include/mach/io.h b/arch/arm/mach-sa1100/include/mach/io.h
deleted file mode 100644
index dfc27ff08344..000000000000
--- a/arch/arm/mach-sa1100/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/io.h
- *
- * Copyright (C) 1997-1999 Russell King
- *
- * Modifications:
- * 06-12-1997 RMK Created.
- * 07-04-1999 RMK Major cleanup
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/*
- * __io() is required to be an equivalent mapping to __mem_pci() for
- * SOC_COMMON to work.
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index 6a2a7f2c2557..2704bcd869cd 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -15,6 +15,7 @@
#include <asm/mach-types.h>
#include <asm/leds.h>
#include <asm/param.h>
+#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shark/include/mach/io.h b/arch/arm/mach-shark/include/mach/io.h
index 9ccbcecc430b..1a45fc01ff1d 100644
--- a/arch/arm/mach-shark/include/mach/io.h
+++ b/arch/arm/mach-shark/include/mach/io.h
@@ -15,6 +15,4 @@
#define __io(a) ((void __iomem *)(0xe0000000 + (a)))
-#define __mem_pci(addr) (addr)
-
#endif
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 060e5644c49c..34560cab45d9 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -100,6 +100,10 @@ config MACH_MARZEN
comment "SH-Mobile System Configuration"
+config CPU_HAS_INTEVT
+ bool
+ default y
+
menu "Memory configuration"
config MEMORY_START
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index f50d7c8b1221..cb224a344af0 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -43,6 +43,7 @@
#include <video/sh_mipi_dsi.h>
#include <sound/sh_fsi.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
#include <asm/mach-types.h>
@@ -584,7 +585,7 @@ static void __init ag5evm_init(void)
#ifdef CONFIG_CACHE_L2X0
/* Shared attribute override enable, 64K*8way */
- l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
+ l2x0_init(IOMEM(0xf0100000), 0x00460000, 0xc2000fff);
#endif
sh73a0_add_standard_devices();
platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices));
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index 8b2124da245d..81fd95f7f52a 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -35,6 +35,7 @@
#include <asm/mach/time.h>
#include <asm/hardware/cache-l2x0.h>
#include <mach/r8a7740.h>
+#include <mach/irqs.h>
#include <video/sh_mobile_lcdc.h>
/*
@@ -370,7 +371,7 @@ static void __init bonito_init(void)
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 32K*8way */
- l2x0_init(__io(0xf0002000), 0x40440000, 0x82000fff);
+ l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
#endif
r8a7740_add_standard_devices();
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index b627e89037f5..39b6cf85ced6 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -33,6 +33,7 @@
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <linux/dma-mapping.h>
+#include <mach/irqs.h>
#include <mach/sh7367.h>
#include <mach/common.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 46d757d2759d..0e5a39c670bc 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -34,6 +34,7 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/gpio.h>
#include <linux/dma-mapping.h>
+#include <mach/irqs.h>
#include <mach/sh7377.h>
#include <mach/common.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index 61c067294660..200dcd42a3a0 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -39,6 +39,7 @@
#include <linux/mfd/tmio.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
#include <asm/mach-types.h>
@@ -507,7 +508,7 @@ static void __init kota2_init(void)
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*8way */
- l2x0_init(__io(0xf0100000), 0x40460000, 0x82000fff);
+ l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
#endif
sh73a0_add_standard_devices();
platform_add_devices(kota2_devices, ARRAY_SIZE(kota2_devices));
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index ca609502d6cd..f49e28abe0ab 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,6 +39,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/mtd/sh_flctl.h>
#include <linux/pm_clock.h>
#include <linux/smsc911x.h>
#include <linux/sh_intc.h>
@@ -54,6 +55,7 @@
#include <sound/sh_fsi.h>
#include <mach/common.h>
+#include <mach/irqs.h>
#include <mach/sh7372.h>
#include <asm/mach/arch.h>
@@ -955,6 +957,50 @@ static struct platform_device fsi_ak4643_device = {
},
};
+/* FLCTL */
+static struct mtd_partition nand_partition_info[] = {
+ {
+ .name = "system",
+ .offset = 0,
+ .size = 128 * 1024 * 1024,
+ },
+ {
+ .name = "userdata",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 256 * 1024 * 1024,
+ },
+ {
+ .name = "cache",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 128 * 1024 * 1024,
+ },
+};
+
+static struct resource nand_flash_resources[] = {
+ [0] = {
+ .start = 0xe6a30000,
+ .end = 0xe6a3009b,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct sh_flctl_platform_data nand_flash_data = {
+ .parts = nand_partition_info,
+ .nr_parts = ARRAY_SIZE(nand_partition_info),
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET
+ | SHBUSSEL | SEL_16BIT | SNAND_E,
+ .use_holden = 1,
+};
+
+static struct platform_device nand_flash_device = {
+ .name = "sh_flctl",
+ .resource = nand_flash_resources,
+ .num_resources = ARRAY_SIZE(nand_flash_resources),
+ .dev = {
+ .platform_data = &nand_flash_data,
+ },
+};
+
/*
* The card detect pin of the top SD/MMC slot (CN7) is active low and is
* connected to GPIO A22 of SH7372 (GPIO_PORT41).
@@ -1258,6 +1304,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
&fsi_device,
&fsi_ak4643_device,
&fsi_hdmi_device,
+ &nand_flash_device,
&sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
&sdhi1_device,
@@ -1487,6 +1534,30 @@ static void __init mackerel_init(void)
gpio_request(GPIO_FN_MMCCMD0, NULL);
gpio_request(GPIO_FN_MMCCLK0, NULL);
+ /* FLCTL */
+ gpio_request(GPIO_FN_D0_NAF0, NULL);
+ gpio_request(GPIO_FN_D1_NAF1, NULL);
+ gpio_request(GPIO_FN_D2_NAF2, NULL);
+ gpio_request(GPIO_FN_D3_NAF3, NULL);
+ gpio_request(GPIO_FN_D4_NAF4, NULL);
+ gpio_request(GPIO_FN_D5_NAF5, NULL);
+ gpio_request(GPIO_FN_D6_NAF6, NULL);
+ gpio_request(GPIO_FN_D7_NAF7, NULL);
+ gpio_request(GPIO_FN_D8_NAF8, NULL);
+ gpio_request(GPIO_FN_D9_NAF9, NULL);
+ gpio_request(GPIO_FN_D10_NAF10, NULL);
+ gpio_request(GPIO_FN_D11_NAF11, NULL);
+ gpio_request(GPIO_FN_D12_NAF12, NULL);
+ gpio_request(GPIO_FN_D13_NAF13, NULL);
+ gpio_request(GPIO_FN_D14_NAF14, NULL);
+ gpio_request(GPIO_FN_D15_NAF15, NULL);
+ gpio_request(GPIO_FN_FCE0, NULL);
+ gpio_request(GPIO_FN_WE0_FWE, NULL);
+ gpio_request(GPIO_FN_FRB, NULL);
+ gpio_request(GPIO_FN_A4_FOE, NULL);
+ gpio_request(GPIO_FN_A5_FCDE, NULL);
+ gpio_request(GPIO_FN_RD_FSC, NULL);
+
/* enable GPS module (GT-720F) */
gpio_request(GPIO_FN_SCIFA2_TXD1, NULL);
gpio_request(GPIO_FN_SCIFA2_RXD1, NULL);
@@ -1531,6 +1602,7 @@ static void __init mackerel_init(void)
sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device);
sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &nand_flash_device);
sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cbd5e4cd06d2..ef0e13bf0b3a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -31,6 +31,7 @@
#include <mach/hardware.h>
#include <mach/r8a7779.h>
#include <mach/common.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index de243e3c8392..94d1f88246d3 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -511,7 +511,7 @@ enum { MSTP001, MSTP000,
MSTP223,
MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
- MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
+ MSTP328, MSTP323, MSTP322, MSTP315, MSTP314, MSTP313, MSTP312,
MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
MSTP405, MSTP404, MSTP403, MSTP400,
MSTP_NR };
@@ -553,6 +553,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
+ [MSTP315] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 15, 0), /* FLCTL*/
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
[MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */
@@ -653,6 +654,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */
+ CLKDEV_DEV_ID("sh_flctl.0", &mstp_clks[MSTP315]), /* FLCTL */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 21b09b6455e4..7e6559105d40 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -13,6 +13,7 @@
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <asm/cpuidle.h>
#include <asm/io.h>
static void shmobile_enter_wfi(void)
@@ -28,37 +29,19 @@ static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- ktime_t before, after;
-
- before = ktime_get();
-
- local_irq_disable();
- local_fiq_disable();
-
shmobile_cpuidle_modes[index]();
- local_irq_enable();
- local_fiq_enable();
-
- after = ktime_get();
- dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
-
return index;
}
static struct cpuidle_device shmobile_cpuidle_dev;
static struct cpuidle_driver shmobile_cpuidle_driver = {
- .name = "shmobile_cpuidle",
- .owner = THIS_MODULE,
- .states[0] = {
- .name = "C1",
- .desc = "WFI",
- .exit_latency = 1,
- .target_residency = 1 * 2,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- },
- .safe_state_index = 0, /* C1 */
- .state_count = 1,
+ .name = "shmobile_cpuidle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .safe_state_index = 0, /* C1 */
+ .state_count = 1,
};
void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
diff --git a/arch/arm/mach-shmobile/include/mach/io.h b/arch/arm/mach-shmobile/include/mach/io.h
deleted file mode 100644
index 7339fe46cb7c..000000000000
--- a/arch/arm/mach-shmobile/include/mach/io.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_MACH_IO_H
-#define __ASM_MACH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) ((void __iomem *)(a))
-#define __mem_pci(a) (a)
-
-#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-shmobile/include/mach/irqs.h b/arch/arm/mach-shmobile/include/mach/irqs.h
index dcb714f4d75a..4e686cc201fc 100644
--- a/arch/arm/mach-shmobile/include/mach/irqs.h
+++ b/arch/arm/mach-shmobile/include/mach/irqs.h
@@ -1,15 +1,11 @@
#ifndef __ASM_MACH_IRQS_H
#define __ASM_MACH_IRQS_H
-#define NR_IRQS 1024
+#include <linux/sh_intc.h>
/* GIC */
#define gic_spi(nr) ((nr) + 32)
-/* INTCA */
-#define evt2irq(evt) (((evt) >> 5) - 16)
-#define irq2evt(irq) (((irq) + 16) << 5)
-
/* INTCS */
#define INTCS_VECT_BASE 0x2200
#define INTCS_VECT(n, vect) INTC_VECT((n), INTCS_VECT_BASE + (vect))
diff --git a/arch/arm/mach-shmobile/intc-r8a7740.c b/arch/arm/mach-shmobile/intc-r8a7740.c
index 272c84c20c83..09c42afcb22d 100644
--- a/arch/arm/mach-shmobile/intc-r8a7740.c
+++ b/arch/arm/mach-shmobile/intc-r8a7740.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <mach/intc.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/intc-r8a7779.c b/arch/arm/mach-shmobile/intc-r8a7779.c
index 5d92fcde2bc3..550b23df4fd4 100644
--- a/arch/arm/mach-shmobile/intc-r8a7779.c
+++ b/arch/arm/mach-shmobile/intc-r8a7779.c
@@ -42,8 +42,8 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
void __init r8a7779_init_irq(void)
{
- void __iomem *gic_dist_base = __io(0xf0001000);
- void __iomem *gic_cpu_base = __io(0xf0000100);
+ void __iomem *gic_dist_base = IOMEM(0xf0001000);
+ void __iomem *gic_cpu_base = IOMEM(0xf0000100);
/* use GIC to handle interrupts */
gic_init(0, 29, gic_dist_base, gic_cpu_base);
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index cfde9bfc3669..5bf776495b75 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <mach/intc.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 89afcaba99a1..6447e0af52d4 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <mach/intc.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 2af4e6e9bc5b..b84a460a3405 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <mach/intc.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 9857595eaa79..ee447404c857 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <mach/intc.h>
+#include <mach/irqs.h>
#include <mach/sh73a0.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
@@ -420,8 +421,8 @@ static irqreturn_t sh73a0_pint1_demux(int irq, void *dev_id)
void __init sh73a0_init_irq(void)
{
- void __iomem *gic_dist_base = __io(0xf0001000);
- void __iomem *gic_cpu_base = __io(0xf0000100);
+ void __iomem *gic_dist_base = IOMEM(0xf0001000);
+ void __iomem *gic_cpu_base = IOMEM(0xf0000100);
void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
int k, n;
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 74e52341dd1b..14edb5cffa7f 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -26,6 +26,7 @@
#include <linux/sh_timer.h>
#include <mach/r8a7740.h>
#include <mach/common.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 6820d785493d..12c6f529ab89 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -29,6 +29,7 @@
#include <linux/sh_intc.h>
#include <linux/sh_timer.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/r8a7779.h>
#include <mach/common.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index a51e1a1e6996..2e3074ab75b3 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -30,6 +30,7 @@
#include <linux/sh_timer.h>
#include <mach/hardware.h>
#include <mach/common.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 4e818b7de781..2fe8f83ca124 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -33,6 +33,7 @@
#include <linux/pm_domain.h>
#include <linux/dma-mapping.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/sh7372.h>
#include <mach/common.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index 9f146095098b..d576a6abbade 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -32,6 +32,7 @@
#include <mach/hardware.h>
#include <mach/common.h>
#include <asm/mach/map.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index b6a0734a738e..5bebffc10455 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -31,6 +31,7 @@
#include <linux/sh_intc.h>
#include <linux/sh_timer.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 9bb7b8575a1f..b62e19d4c9af 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -30,7 +30,7 @@
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
-#define AVECR 0xfe700040
+#define AVECR IOMEM(0xfe700040)
static struct r8a7779_pm_ch r8a7779_ch_cpu1 = {
.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
@@ -138,7 +138,7 @@ void __init r8a7779_smp_prepare_cpus(void)
scu_enable(scu_base_addr());
/* Map the reset vector (in headsmp.S) */
- __raw_writel(__pa(shmobile_secondary_vector), __io(AVECR));
+ __raw_writel(__pa(shmobile_secondary_vector), AVECR);
/* enable cache coherency on CPU0 */
modify_scu_cpu_psr(0, 3 << (cpu * 8));
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index c0a9093ba3a8..14ad8b052f1a 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -28,11 +28,11 @@
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
-#define WUPCR 0xe6151010
-#define SRESCR 0xe6151018
-#define PSTR 0xe6151040
-#define SBAR 0xe6180020
-#define APARMBAREA 0xe6f10020
+#define WUPCR IOMEM(0xe6151010)
+#define SRESCR IOMEM(0xe6151018)
+#define PSTR IOMEM(0xe6151040)
+#define SBAR IOMEM(0xe6180020)
+#define APARMBAREA IOMEM(0xe6f10020)
static void __iomem *scu_base_addr(void)
{
@@ -78,10 +78,10 @@ int __cpuinit sh73a0_boot_secondary(unsigned int cpu)
/* enable cache coherency */
modify_scu_cpu_psr(0, 3 << (cpu * 8));
- if (((__raw_readl(__io(PSTR)) >> (4 * cpu)) & 3) == 3)
- __raw_writel(1 << cpu, __io(WUPCR)); /* wake up */
+ if (((__raw_readl(PSTR) >> (4 * cpu)) & 3) == 3)
+ __raw_writel(1 << cpu, WUPCR); /* wake up */
else
- __raw_writel(1 << cpu, __io(SRESCR)); /* reset */
+ __raw_writel(1 << cpu, SRESCR); /* reset */
return 0;
}
@@ -93,8 +93,8 @@ void __init sh73a0_smp_prepare_cpus(void)
scu_enable(scu_base_addr());
/* Map the reset vector (in headsmp.S) */
- __raw_writel(0, __io(APARMBAREA)); /* 4k */
- __raw_writel(__pa(shmobile_secondary_vector), __io(SBAR));
+ __raw_writel(0, APARMBAREA); /* 4k */
+ __raw_writel(__pa(shmobile_secondary_vector), SBAR);
/* enable cache coherency on CPU0 */
modify_scu_cpu_psr(0, 3 << (cpu * 8));
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
index f67860cd649f..6c4841f55223 100644
--- a/arch/arm/mach-spear3xx/clock.c
+++ b/arch/arm/mach-spear3xx/clock.c
@@ -12,6 +12,7 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <asm/mach-types.h>
#include <plat/clock.h>
diff --git a/arch/arm/mach-spear3xx/include/mach/io.h b/arch/arm/mach-spear3xx/include/mach/io.h
deleted file mode 100644
index 30cff8a1f6b5..000000000000
--- a/arch/arm/mach-spear3xx/include/mach/io.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/include/mach/io.h
- *
- * IO definitions for SPEAr3xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_IO_H
-#define __MACH_IO_H
-
-#include <plat/io.h>
-
-#endif /* __MACH_IO_H */
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
index 358f2800f17b..a86499a8a15f 100644
--- a/arch/arm/mach-spear6xx/clock.c
+++ b/arch/arm/mach-spear6xx/clock.c
@@ -12,6 +12,7 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <plat/clock.h>
#include <mach/misc_regs.h>
diff --git a/arch/arm/mach-spear6xx/include/mach/io.h b/arch/arm/mach-spear6xx/include/mach/io.h
deleted file mode 100644
index fb7c106cea94..000000000000
--- a/arch/arm/mach-spear6xx/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/include/mach/io.h
- *
- * IO definitions for SPEAr6xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Rajeev Kumar Kumar<rajeev-dlh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_IO_H
-#define __MACH_IO_H
-
-#include <plat/io.h>
-
-#endif /* __MACH_IO_H */
-
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index e20b419d5983..0952494f481a 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -68,11 +68,11 @@ struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S2_BASE, "tegra-i2s.1", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB_BASE, "tegra-ehci.0",
- &tegra_ehci1_device.dev.platform_data),
+ &tegra_ehci1_pdata),
OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB2_BASE, "tegra-ehci.1",
- &tegra_ehci2_device.dev.platform_data),
+ &tegra_ehci2_pdata),
OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2",
- &tegra_ehci3_device.dev.platform_data),
+ &tegra_ehci3_pdata),
{}
};
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 7a2a02dbd632..5f6b867e20b4 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -23,7 +23,6 @@
#include <linux/fsl_devices.h>
#include <linux/serial_8250.h>
#include <linux/i2c-tegra.h>
-#include <linux/platform_data/tegra_usb.h>
#include <asm/pmu.h>
#include <mach/irqs.h>
#include <mach/iomap.h>
@@ -446,18 +445,18 @@ static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
.clk = "cdev2",
};
-static struct tegra_ehci_platform_data tegra_ehci1_pdata = {
+struct tegra_ehci_platform_data tegra_ehci1_pdata = {
.operating_mode = TEGRA_USB_OTG,
.power_down_on_bus_suspend = 1,
};
-static struct tegra_ehci_platform_data tegra_ehci2_pdata = {
+struct tegra_ehci_platform_data tegra_ehci2_pdata = {
.phy_config = &tegra_ehci2_ulpi_phy_config,
.operating_mode = TEGRA_USB_HOST,
.power_down_on_bus_suspend = 1,
};
-static struct tegra_ehci_platform_data tegra_ehci3_pdata = {
+struct tegra_ehci_platform_data tegra_ehci3_pdata = {
.operating_mode = TEGRA_USB_HOST,
.power_down_on_bus_suspend = 1,
};
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 873ecb2f8ae6..ec455679b219 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -20,6 +20,11 @@
#define __MACH_TEGRA_DEVICES_H
#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_usb.h>
+
+extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
+extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
+extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
extern struct platform_device tegra_gpio_device;
extern struct platform_device tegra_pinmux_device;
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S
index 90069abd37bd..8ce0661b8a3d 100644
--- a/arch/arm/mach-tegra/include/mach/debug-macro.S
+++ b/arch/arm/mach-tegra/include/mach/debug-macro.S
@@ -26,7 +26,6 @@
#include <linux/serial_reg.h>
-#include <mach/io.h>
#include <mach/iomap.h>
#include <mach/irammap.h>
diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
index f15defffb5d2..fe700f9ce7dc 100644
--- a/arch/arm/mach-tegra/include/mach/io.h
+++ b/arch/arm/mach-tegra/include/mach/io.h
@@ -23,56 +23,8 @@
#define IO_SPACE_LIMIT 0xffff
-/* On TEGRA, many peripherals are very closely packed in
- * two 256MB io windows (that actually only use about 64KB
- * at the start of each).
- *
- * We will just map the first 1MB of each window (to minimize
- * pt entries needed) and provide a macro to transform physical
- * io addresses to an appropriate void __iomem *.
- *
- */
-
-#ifdef __ASSEMBLY__
-#define IOMEM(x) (x)
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
-#define IO_IRAM_PHYS 0x40000000
-#define IO_IRAM_VIRT IOMEM(0xFE400000)
-#define IO_IRAM_SIZE SZ_256K
-
-#define IO_CPU_PHYS 0x50040000
-#define IO_CPU_VIRT IOMEM(0xFE000000)
-#define IO_CPU_SIZE SZ_16K
-
-#define IO_PPSB_PHYS 0x60000000
-#define IO_PPSB_VIRT IOMEM(0xFE200000)
-#define IO_PPSB_SIZE SZ_1M
-
-#define IO_APB_PHYS 0x70000000
-#define IO_APB_VIRT IOMEM(0xFE300000)
-#define IO_APB_SIZE SZ_1M
-
-#define IO_TO_VIRT_BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
-#define IO_TO_VIRT_XLATE(p, pst, vst) (((p) - (pst) + (vst)))
-
-#define IO_TO_VIRT(n) ( \
- IO_TO_VIRT_BETWEEN((n), IO_PPSB_PHYS, IO_PPSB_SIZE) ? \
- IO_TO_VIRT_XLATE((n), IO_PPSB_PHYS, IO_PPSB_VIRT) : \
- IO_TO_VIRT_BETWEEN((n), IO_APB_PHYS, IO_APB_SIZE) ? \
- IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \
- IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \
- IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
- IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
- IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
- NULL)
-
#ifndef __ASSEMBLER__
-#define IO_ADDRESS(n) (IO_TO_VIRT(n))
-
#ifdef CONFIG_TEGRA_PCI
extern void __iomem *tegra_pcie_io_base;
@@ -88,7 +40,6 @@ static inline void __iomem *__io(unsigned long addr)
#endif
#define __io(a) __io(a)
-#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h
index cff672a344f4..7e76da73121c 100644
--- a/arch/arm/mach-tegra/include/mach/iomap.h
+++ b/arch/arm/mach-tegra/include/mach/iomap.h
@@ -277,4 +277,46 @@
# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE
#endif
+/* On TEGRA, many peripherals are very closely packed in
+ * two 256MB io windows (that actually only use about 64KB
+ * at the start of each).
+ *
+ * We will just map the first 1MB of each window (to minimize
+ * pt entries needed) and provide a macro to transform physical
+ * io addresses to an appropriate void __iomem *.
+ *
+ */
+
+#define IO_IRAM_PHYS 0x40000000
+#define IO_IRAM_VIRT IOMEM(0xFE400000)
+#define IO_IRAM_SIZE SZ_256K
+
+#define IO_CPU_PHYS 0x50040000
+#define IO_CPU_VIRT IOMEM(0xFE000000)
+#define IO_CPU_SIZE SZ_16K
+
+#define IO_PPSB_PHYS 0x60000000
+#define IO_PPSB_VIRT IOMEM(0xFE200000)
+#define IO_PPSB_SIZE SZ_1M
+
+#define IO_APB_PHYS 0x70000000
+#define IO_APB_VIRT IOMEM(0xFE300000)
+#define IO_APB_SIZE SZ_1M
+
+#define IO_TO_VIRT_BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
+#define IO_TO_VIRT_XLATE(p, pst, vst) (((p) - (pst) + (vst)))
+
+#define IO_TO_VIRT(n) ( \
+ IO_TO_VIRT_BETWEEN((n), IO_PPSB_PHYS, IO_PPSB_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_PPSB_PHYS, IO_PPSB_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_APB_PHYS, IO_APB_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
+ NULL)
+
+#define IO_ADDRESS(n) (IO_TO_VIRT(n))
+
#endif
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index d23ee2db2827..58b4baf9c483 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -26,6 +26,7 @@
#include <asm/page.h>
#include <asm/mach/map.h>
+#include <mach/iomap.h>
#include "board.h"
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 8f9fde161c34..5b20197bae7f 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -23,7 +23,9 @@
*/
#include <linux/linkage.h>
-#include <mach/io.h>
+
+#include <asm/assembler.h>
+
#include <mach/iomap.h>
#include "flowctrl.h"
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 8b90c44d237f..1621ad07d284 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1544,6 +1544,8 @@ static struct fsmc_nand_platform_data nand_platform_data = {
.nr_partitions = ARRAY_SIZE(u300_partitions),
.options = NAND_SKIP_BBTSCAN,
.width = FSMC_NAND_BW8,
+ .ale_off = PLAT_NAND_ALE,
+ .cle_off = PLAT_NAND_CLE,
};
static struct platform_device nand_device = {
diff --git a/arch/arm/mach-u300/include/mach/io.h b/arch/arm/mach-u300/include/mach/io.h
deleted file mode 100644
index 5d6b4c13b3a0..000000000000
--- a/arch/arm/mach-u300/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/include/mach/io.h
- *
- *
- * Copyright (C) 2006-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Dummy IO map for being able to use writew()/readw(),
- * writel()/readw() and similar accessor functions.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#ifndef __MACH_IO_H
-#define __MACH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 035fdc9dbdb0..65f87c523892 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -18,18 +18,17 @@
* the defines are used for setting up the I/O memory mapping.
*/
-#ifdef __ASSEMBLER__
-#define IOMEM(a) (a)
-#else
-#define IOMEM(a) (void __iomem *) a
-#endif
-
/* NAND Flash CS0 */
#define U300_NAND_CS0_PHYS_BASE 0x80000000
/* NFIF */
#define U300_NAND_IF_PHYS_BASE 0x9f800000
+/* ALE, CLE offset for FSMC NAND */
+#define PLAT_NAND_CLE (1 << 16)
+#define PLAT_NAND_ALE (1 << 17)
+
+
/* AHB Peripherals */
#define U300_AHB_PER_PHYS_BASE 0xa0000000
#define U300_AHB_PER_VIRT_BASE 0xff010000
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index d93d6dbef25b..f84698936d36 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -23,7 +23,7 @@
(((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + U8500_IO_VIRTUAL)
/* typesafe io address */
-#define __io_address(n) __io(IO_ADDRESS(n))
+#define __io_address(n) IOMEM(IO_ADDRESS(n))
/* Used by some plat-nomadik code */
#define io_p2v(n) __io_address(n)
diff --git a/arch/arm/mach-ux500/include/mach/io.h b/arch/arm/mach-ux500/include/mach/io.h
deleted file mode 100644
index 1cf3f44ce5b2..000000000000
--- a/arch/arm/mach-ux500/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-u8500/include/mach/io.h
- *
- * Copyright (C) 1997-1999 Russell King
- *
- * Modifications:
- * 06-12-1997 RMK Created.
- * 07-04-1999 RMK Major cleanup
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
deleted file mode 100644
index f067c14c7182..000000000000
--- a/arch/arm/mach-versatile/include/mach/io.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-versatile/include/mach/io.h
- *
- * Copyright (C) 2003 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h
deleted file mode 100644
index 13522d86685e..000000000000
--- a/arch/arm/mach-vexpress/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * arch/arm/mach-vexpress/include/mach/io.h
- *
- * Copyright (C) 2003 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-vt8500/include/mach/io.h b/arch/arm/mach-vt8500/include/mach/io.h
deleted file mode 100644
index 46181eecf273..000000000000
--- a/arch/arm/mach-vt8500/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * arch/arm/mach-vt8500/include/mach/io.h
- *
- * Copyright (C) 2010 Alexey Charkov
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define __io(a) __typesafe_io((a) + 0xf0000000)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index db82568a998a..48f5b9fdfb7f 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -27,6 +27,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <asm/system_misc.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-w90x900/include/mach/io.h b/arch/arm/mach-w90x900/include/mach/io.h
deleted file mode 100644
index d96ab99df05b..000000000000
--- a/arch/arm/mach-w90x900/include/mach/io.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * arch/arm/mach-w90x900/include/mach/io.h
- *
- * Copyright (c) 2008 Nuvoton technology corporation
- * All rights reserved.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * Based on arch/arm/mach-s3c2410/include/mach/io.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * 1:1 mapping for ioremapped regions.
- */
-
-#define __mem_pci(a) (a)
-#define __io(a) __typesafe_io(a)
-
-#endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b1e192ba8c24..a53fd2aaa2f4 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -30,13 +30,13 @@
static void __iomem *l2x0_base;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
-static uint32_t l2x0_way_mask; /* Bitmask of active ways */
-static uint32_t l2x0_size;
+static u32 l2x0_way_mask; /* Bitmask of active ways */
+static u32 l2x0_size;
struct l2x0_regs l2x0_saved_regs;
struct l2x0_of_data {
- void (*setup)(const struct device_node *, __u32 *, __u32 *);
+ void (*setup)(const struct device_node *, u32 *, u32 *);
void (*save)(void);
void (*resume)(void);
};
@@ -288,7 +288,7 @@ static void l2x0_disable(void)
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void l2x0_unlock(__u32 cache_id)
+static void l2x0_unlock(u32 cache_id)
{
int lockregs;
int i;
@@ -307,11 +307,11 @@ static void l2x0_unlock(__u32 cache_id)
}
}
-void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
{
- __u32 aux;
- __u32 cache_id;
- __u32 way_size = 0;
+ u32 aux;
+ u32 cache_id;
+ u32 way_size = 0;
int ways;
const char *type;
@@ -388,7 +388,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
#ifdef CONFIG_OF
static void __init l2x0_of_setup(const struct device_node *np,
- __u32 *aux_val, __u32 *aux_mask)
+ u32 *aux_val, u32 *aux_mask)
{
u32 data[2] = { 0, 0 };
u32 tag = 0;
@@ -422,7 +422,7 @@ static void __init l2x0_of_setup(const struct device_node *np,
}
static void __init pl310_of_setup(const struct device_node *np,
- __u32 *aux_val, __u32 *aux_mask)
+ u32 *aux_val, u32 *aux_mask)
{
u32 data[3] = { 0, 0, 0 };
u32 tag[3] = { 0, 0, 0 };
@@ -548,7 +548,7 @@ static const struct of_device_id l2x0_ids[] __initconst = {
{}
};
-int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
+int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
{
struct device_node *np;
struct l2x0_of_data *data;
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index ec8c3befb9c8..1267e64133b9 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -23,10 +23,6 @@
#include "mm.h"
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
@@ -78,10 +74,9 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_lock(&minicache_lock);
- set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(0xffff8000);
+ set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
- mc_copy_user_page((void *)0xffff8000, kto);
+ mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
raw_spin_unlock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8b03a5814d00..b9bcc9d79176 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -24,9 +24,6 @@
#error FIX ME
#endif
-#define from_address (0xffff8000)
-#define to_address (0xffffc000)
-
static DEFINE_RAW_SPINLOCK(v6_lock);
/*
@@ -90,14 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
*/
raw_spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
-
- kfrom = from_address + (offset << PAGE_SHIFT);
- kto = to_address + (offset << PAGE_SHIFT);
+ kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
+ kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
- flush_tlb_kernel_page(kfrom);
- flush_tlb_kernel_page(kto);
+ set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
+ set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
copy_page((void *)kto, (void *)kfrom);
@@ -111,8 +105,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
*/
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
{
- unsigned int offset = CACHE_COLOUR(vaddr);
- unsigned long to = to_address + (offset << PAGE_SHIFT);
+ unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(page));
@@ -123,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
*/
raw_spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
- flush_tlb_kernel_page(to);
+ set_top_pte(to, mk_pte(page, PAGE_KERNEL));
clear_page((void *)to);
raw_spin_unlock(&v6_lock);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 439d106ae638..0fb85025344d 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -23,12 +23,6 @@
#include "mm.h"
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
-#define COPYPAGE_MINICACHE 0xffff8000
-
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
@@ -100,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_lock(&minicache_lock);
- set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+ set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1aa664a1999f..db23ae4aaaab 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -214,7 +214,8 @@ static int __init consistent_init(void)
core_initcall(consistent_init);
static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
struct arm_vmregion *c;
size_t align;
@@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
* Allocate a virtual address in the consistent mapping region.
*/
c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
if (c) {
pte_t *pte;
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
@@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
+#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
#define __dma_free_remap(addr, size) do { } while (0)
#endif /* CONFIG_MMU */
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
- pgprot_t prot)
+ pgprot_t prot, const void *caller)
{
struct page *page;
void *addr;
@@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
return NULL;
if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot);
+ addr = __dma_alloc_remap(page, size, gfp, prot, caller);
else
addr = page_address(page);
@@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
return memory;
return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel));
+ pgprot_dmacoherent(pgprot_kernel),
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(dma_alloc_coherent);
@@ -386,7 +388,8 @@ void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel));
+ pgprot_writecombine(pgprot_kernel),
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(dma_alloc_writecombine);
@@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask);
static int __init dma_debug_do_init(void)
{
+#ifdef CONFIG_MMU
+ arm_vmregion_create_proc("dma-mappings", &consistent_head);
+#endif
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5bdff5c3e6cb..9055b5a84ec5 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -165,7 +165,8 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
struct siginfo si;
#ifdef CONFIG_DEBUG_USER
- if (user_debug & UDBG_SEGV) {
+ if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
+ ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
tsk->comm, sig, addr, fsr);
show_pte(tsk->mm, addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 062d61a1f87d..77458548e031 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -22,15 +22,12 @@
#ifdef CONFIG_CPU_CACHE_VIPT
-#define ALIAS_FLUSH_START 0xffff4000
-
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
{
- unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+ unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
const int zero = 0;
- set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
- flush_tlb_kernel_page(to);
+ set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4"
@@ -41,13 +38,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
{
- unsigned long colour = CACHE_COLOUR(vaddr);
+ unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
unsigned long offset = vaddr & (PAGE_SIZE - 1);
unsigned long to;
- set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
- to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
- flush_tlb_kernel_page(to);
+ set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
+ to = va + offset;
flush_icache_range(to, to + len);
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 5a21505d7550..21b9e1bf9b77 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -69,15 +69,14 @@ void *kmap_atomic(struct page *page)
* With debugging enabled, kunmap_atomic forces that entry to 0.
* Make sure it was indeed properly unmapped.
*/
- BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
+ BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
- set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
/*
* When debugging is off, kunmap_atomic leaves the previous mapping
- * in place, so this TLB flush ensures the TLB is updated with the
- * new mapping.
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
*/
- local_flush_tlb_kernel_page(vaddr);
+ set_top_pte(vaddr, mk_pte(page, kmap_prot));
return (void *)vaddr;
}
@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr)
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
- set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
- local_flush_tlb_kernel_page(vaddr);
+ set_top_pte(vaddr, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
@@ -121,10 +119,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
+ BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
- set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
- local_flush_tlb_kernel_page(vaddr);
+ set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
return (void *)vaddr;
}
@@ -132,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
struct page *kmap_atomic_to_page(const void *ptr)
{
unsigned long vaddr = (unsigned long)ptr;
- pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
- pte = TOP_PTE(vaddr);
- return pte_page(*pte);
+ return pte_page(get_top_pte(vaddr));
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 245a55a0a5bb..595079fa9d1d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,7 +658,9 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
+#ifdef CONFIG_MODULES
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n"
@@ -677,7 +679,9 @@ void __init mem_init(void)
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
#endif
+#ifdef CONFIG_MODULES
MLM(MODULES_VADDR, MODULES_END),
+#endif
MLK_ROUNDUP(_text, _etext),
MLK_ROUNDUP(__init_begin, __init_end),
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6780b49f2c69..4f55f5062ab7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -308,11 +308,15 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
+void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+ unsigned int, void *) =
+ __arm_ioremap_caller;
+
void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
- return __arm_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
+ return arch_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
@@ -371,4 +375,11 @@ void __iounmap(volatile void __iomem *io_addr)
vunmap(addr);
}
-EXPORT_SYMBOL(__iounmap);
+
+void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
+
+void __arm_iounmap(volatile void __iomem *io_addr)
+{
+ arch_iounmap(io_addr);
+}
+EXPORT_SYMBOL(__arm_iounmap);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 70f6d3ea4834..27f4a619b35d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -3,7 +3,31 @@
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
-#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
+/*
+ * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
+ * specific hacks for copying pages efficiently, while 0xffff4000
+ * is reserved for VIPT aliasing flushing by generic code.
+ *
+ * Note that we don't allow VIPT aliasing caches with SMP.
+ */
+#define COPYPAGE_MINICACHE 0xffff8000
+#define COPYPAGE_V6_FROM 0xffff8000
+#define COPYPAGE_V6_TO 0xffffc000
+/* PFN alias flushing, for VIPT caches */
+#define FLUSH_ALIAS_START 0xffff4000
+
+static inline void set_top_pte(unsigned long va, pte_t pte)
+{
+ pte_t *ptep = pte_offset_kernel(top_pmd, va);
+ set_pte_ext(ptep, pte, 0);
+ local_flush_tlb_kernel_page(va);
+}
+
+static inline pte_t get_top_pte(unsigned long va)
+{
+ pte_t *ptep = pte_offset_kernel(top_pmd, va);
+ return *ptep;
+}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cd439c1dd506..b86f8933ff91 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -999,11 +999,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
+ void *vectors;
/*
* Allocate the vector page early.
*/
- vectors_page = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE);
+
+ early_trap_init(vectors);
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -1043,7 +1046,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
- map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 4fc6794cca4b..6486d2f253cd 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -86,13 +86,17 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
+void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);
+
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
-void __iounmap(volatile void __iomem *addr)
+void (*arch_iounmap)(volatile void __iomem *);
+
+void __arm_iounmap(volatile void __iomem *addr)
{
}
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(__arm_iounmap);
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 036fdbfdd62f..a631016e1f8f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -1,5 +1,8 @@
+#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include "vmregion.h"
@@ -36,7 +39,7 @@
struct arm_vmregion *
arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
- size_t size, gfp_t gfp)
+ size_t size, gfp_t gfp, const void *caller)
{
unsigned long start = head->vm_start, addr = head->vm_end;
unsigned long flags;
@@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
if (!new)
goto out;
+ new->caller = caller;
+
spin_lock_irqsave(&head->vm_lock, flags);
addr = rounddown(addr - size, align);
@@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
kfree(c);
}
+
+#ifdef CONFIG_PROC_FS
+static int arm_vmregion_show(struct seq_file *m, void *p)
+{
+ struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
+
+ seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
+ c->vm_end - c->vm_start);
+ if (c->caller)
+ seq_printf(m, " %pS", (void *)c->caller);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
+{
+ struct arm_vmregion_head *h = m->private;
+ spin_lock_irq(&h->vm_lock);
+ return seq_list_start(&h->vm_list, *pos);
+}
+
+static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct arm_vmregion_head *h = m->private;
+ return seq_list_next(p, &h->vm_list, pos);
+}
+
+static void arm_vmregion_stop(struct seq_file *m, void *p)
+{
+ struct arm_vmregion_head *h = m->private;
+ spin_unlock_irq(&h->vm_lock);
+}
+
+static const struct seq_operations arm_vmregion_ops = {
+ .start = arm_vmregion_start,
+ .stop = arm_vmregion_stop,
+ .next = arm_vmregion_next,
+ .show = arm_vmregion_show,
+};
+
+static int arm_vmregion_open(struct inode *inode, struct file *file)
+{
+ struct arm_vmregion_head *h = PDE(inode)->data;
+ int ret = seq_open(file, &arm_vmregion_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = h;
+ }
+ return ret;
+}
+
+static const struct file_operations arm_vmregion_fops = {
+ .open = arm_vmregion_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
+{
+ proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
+ return 0;
+}
+#else
+int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
+{
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 15e9f044db9f..162be662c088 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -19,11 +19,14 @@ struct arm_vmregion {
unsigned long vm_end;
struct page *vm_pages;
int vm_active;
+ const void *caller;
};
-struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t);
+struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
+int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
+
#endif
diff --git a/arch/arm/net/Makefile b/arch/arm/net/Makefile
new file mode 100644
index 000000000000..c2c10841b6be
--- /dev/null
+++ b/arch/arm/net/Makefile
@@ -0,0 +1,3 @@
+# ARM-specific networking code
+
+obj-$(CONFIG_BPF_JIT) += bpf_jit_32.o
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
new file mode 100644
index 000000000000..62135849f48b
--- /dev/null
+++ b/arch/arm/net/bpf_jit_32.c
@@ -0,0 +1,915 @@
+/*
+ * Just-In-Time compiler for BPF filters on 32bit ARM
+ *
+ * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/moduleloader.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/hwcap.h>
+
+#include "bpf_jit_32.h"
+
+/*
+ * ABI:
+ *
+ * r0 scratch register
+ * r4 BPF register A
+ * r5 BPF register X
+ * r6 pointer to the skb
+ * r7 skb->data
+ * r8 skb_headlen(skb)
+ */
+
+#define r_scratch ARM_R0
+/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
+#define r_off ARM_R1
+#define r_A ARM_R4
+#define r_X ARM_R5
+#define r_skb ARM_R6
+#define r_skb_data ARM_R7
+#define r_skb_hl ARM_R8
+
+#define SCRATCH_SP_OFFSET 0
+#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k))
+
+#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
+#define SEEN_MEM_WORD(k) (1 << (k))
+#define SEEN_X (1 << BPF_MEMWORDS)
+#define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
+#define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
+#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
+
+#define FLAG_NEED_X_RESET (1 << 0)
+
+struct jit_ctx {
+ const struct sk_filter *skf;
+ unsigned idx;
+ unsigned prologue_bytes;
+ int ret0_fp_idx;
+ u32 seen;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+#if __LINUX_ARM_ARCH__ < 7
+ u16 epilogue_bytes;
+ u16 imm_count;
+ u32 *imms;
+#endif
+};
+
+int bpf_jit_enable __read_mostly;
+
+static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+{
+ u8 ret;
+ int err;
+
+ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+}
+
+static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+{
+ u16 ret;
+ int err;
+
+ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+}
+
+static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+{
+ u32 ret;
+ int err;
+
+ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+}
+
+/*
+ * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
+ * (where the assembly routines like __aeabi_uidiv could cause problems).
+ */
+static u32 jit_udiv(u32 dividend, u32 divisor)
+{
+ return dividend / divisor;
+}
+
+static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL)
+ ctx->target[ctx->idx] = inst | (cond << 28);
+
+ ctx->idx++;
+}
+
+/*
+ * Emit an instruction that will be executed unconditionally.
+ */
+static inline void emit(u32 inst, struct jit_ctx *ctx)
+{
+ _emit(ARM_COND_AL, inst, ctx);
+}
+
+static u16 saved_regs(struct jit_ctx *ctx)
+{
+ u16 ret = 0;
+
+ if ((ctx->skf->len > 1) ||
+ (ctx->skf->insns[0].code == BPF_S_RET_A))
+ ret |= 1 << r_A;
+
+#ifdef CONFIG_FRAME_POINTER
+ ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
+#else
+ if (ctx->seen & SEEN_CALL)
+ ret |= 1 << ARM_LR;
+#endif
+ if (ctx->seen & (SEEN_DATA | SEEN_SKB))
+ ret |= 1 << r_skb;
+ if (ctx->seen & SEEN_DATA)
+ ret |= (1 << r_skb_data) | (1 << r_skb_hl);
+ if (ctx->seen & SEEN_X)
+ ret |= 1 << r_X;
+
+ return ret;
+}
+
+static inline int mem_words_used(struct jit_ctx *ctx)
+{
+ /* yes, we do waste some stack space IF there are "holes" in the set" */
+ return fls(ctx->seen & SEEN_MEM);
+}
+
+static inline bool is_load_to_a(u16 inst)
+{
+ switch (inst) {
+ case BPF_S_LD_W_LEN:
+ case BPF_S_LD_W_ABS:
+ case BPF_S_LD_H_ABS:
+ case BPF_S_LD_B_ABS:
+ case BPF_S_ANC_CPU:
+ case BPF_S_ANC_IFINDEX:
+ case BPF_S_ANC_MARK:
+ case BPF_S_ANC_PROTOCOL:
+ case BPF_S_ANC_RXHASH:
+ case BPF_S_ANC_QUEUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+ u16 reg_set = saved_regs(ctx);
+ u16 first_inst = ctx->skf->insns[0].code;
+ u16 off;
+
+#ifdef CONFIG_FRAME_POINTER
+ emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
+ emit(ARM_PUSH(reg_set), ctx);
+ emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
+#else
+ if (reg_set)
+ emit(ARM_PUSH(reg_set), ctx);
+#endif
+
+ if (ctx->seen & (SEEN_DATA | SEEN_SKB))
+ emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
+
+ if (ctx->seen & SEEN_DATA) {
+ off = offsetof(struct sk_buff, data);
+ emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
+ /* headlen = len - data_len */
+ off = offsetof(struct sk_buff, len);
+ emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
+ off = offsetof(struct sk_buff, data_len);
+ emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
+ emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
+ }
+
+ if (ctx->flags & FLAG_NEED_X_RESET)
+ emit(ARM_MOV_I(r_X, 0), ctx);
+
+ /* do not leak kernel data to userspace */
+ if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
+ emit(ARM_MOV_I(r_A, 0), ctx);
+
+ /* stack space for the BPF_MEM words */
+ if (ctx->seen & SEEN_MEM)
+ emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+ u16 reg_set = saved_regs(ctx);
+
+ if (ctx->seen & SEEN_MEM)
+ emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
+
+ reg_set &= ~(1 << ARM_LR);
+
+#ifdef CONFIG_FRAME_POINTER
+ /* the first instruction of the prologue was: mov ip, sp */
+ reg_set &= ~(1 << ARM_IP);
+ reg_set |= (1 << ARM_SP);
+ emit(ARM_LDM(ARM_SP, reg_set), ctx);
+#else
+ if (reg_set) {
+ if (ctx->seen & SEEN_CALL)
+ reg_set |= 1 << ARM_PC;
+ emit(ARM_POP(reg_set), ctx);
+ }
+
+ if (!(ctx->seen & SEEN_CALL))
+ emit(ARM_BX(ARM_LR), ctx);
+#endif
+}
+
+static int16_t imm8m(u32 x)
+{
+ u32 rot;
+
+ for (rot = 0; rot < 16; rot++)
+ if ((x & ~ror32(0xff, 2 * rot)) == 0)
+ return rol32(x, 2 * rot) | (rot << 8);
+
+ return -1;
+}
+
+#if __LINUX_ARM_ARCH__ < 7
+
+static u16 imm_offset(u32 k, struct jit_ctx *ctx)
+{
+ unsigned i = 0, offset;
+ u16 imm;
+
+ /* on the "fake" run we just count them (duplicates included) */
+ if (ctx->target == NULL) {
+ ctx->imm_count++;
+ return 0;
+ }
+
+ while ((i < ctx->imm_count) && ctx->imms[i]) {
+ if (ctx->imms[i] == k)
+ break;
+ i++;
+ }
+
+ if (ctx->imms[i] == 0)
+ ctx->imms[i] = k;
+
+ /* constants go just after the epilogue */
+ offset = ctx->offsets[ctx->skf->len];
+ offset += ctx->prologue_bytes;
+ offset += ctx->epilogue_bytes;
+ offset += i * 4;
+
+ ctx->target[offset / 4] = k;
+
+ /* PC in ARM mode == address of the instruction + 8 */
+ imm = offset - (8 + ctx->idx * 4);
+
+ return imm;
+}
+
+#endif /* __LINUX_ARM_ARCH__ */
+
+/*
+ * Move an immediate that's not an imm8m to a core register.
+ */
+static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 7
+ emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
+#else
+ emit(ARM_MOVW(rd, val & 0xffff), ctx);
+ if (val > 0xffff)
+ emit(ARM_MOVT(rd, val >> 16), ctx);
+#endif
+}
+
+static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
+{
+ int imm12 = imm8m(val);
+
+ if (imm12 >= 0)
+ emit(ARM_MOV_I(rd, imm12), ctx);
+ else
+ emit_mov_i_no8m(rd, val, ctx);
+}
+
+#if __LINUX_ARM_ARCH__ < 6
+
+static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+{
+ _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
+ _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
+ _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
+ _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
+ _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
+ _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
+ _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
+ _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
+}
+
+static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+{
+ _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
+ _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
+ _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
+}
+
+static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
+{
+ emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
+ emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
+ emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
+ emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
+}
+
+#else /* ARMv6+ */
+
+static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+{
+ _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
+#ifdef __LITTLE_ENDIAN
+ _emit(cond, ARM_REV(r_res, r_res), ctx);
+#endif
+}
+
+static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+{
+ _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
+#ifdef __LITTLE_ENDIAN
+ _emit(cond, ARM_REV16(r_res, r_res), ctx);
+#endif
+}
+
+static inline void emit_swap16(u8 r_dst __maybe_unused,
+ u8 r_src __maybe_unused,
+ struct jit_ctx *ctx __maybe_unused)
+{
+#ifdef __LITTLE_ENDIAN
+ emit(ARM_REV16(r_dst, r_src), ctx);
+#endif
+}
+
+#endif /* __LINUX_ARM_ARCH__ < 6 */
+
+
+/* Compute the immediate value for a PC-relative branch. */
+static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
+{
+ u32 imm;
+
+ if (ctx->target == NULL)
+ return 0;
+ /*
+ * BPF allows only forward jumps and the offset of the target is
+ * still the one computed during the first pass.
+ */
+ imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
+
+ return imm >> 2;
+}
+
+#define OP_IMM3(op, r1, r2, imm_val, ctx) \
+ do { \
+ imm12 = imm8m(imm_val); \
+ if (imm12 < 0) { \
+ emit_mov_i_no8m(r_scratch, imm_val, ctx); \
+ emit(op ## _R((r1), (r2), r_scratch), ctx); \
+ } else { \
+ emit(op ## _I((r1), (r2), imm12), ctx); \
+ } \
+ } while (0)
+
+static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
+{
+ if (ctx->ret0_fp_idx >= 0) {
+ _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
+ /* NOP to keep the size constant between passes */
+ emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
+ } else {
+ _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
+ _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
+ }
+}
+
+static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 5
+ emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+
+ if (elf_hwcap & HWCAP_THUMB)
+ emit(ARM_BX(tgt_reg), ctx);
+ else
+ emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
+#else
+ emit(ARM_BLX_R(tgt_reg), ctx);
+#endif
+}
+
+static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ == 7
+ if (elf_hwcap & HWCAP_IDIVA) {
+ emit(ARM_UDIV(rd, rm, rn), ctx);
+ return;
+ }
+#endif
+ if (rm != ARM_R0)
+ emit(ARM_MOV_R(ARM_R0, rm), ctx);
+ if (rn != ARM_R1)
+ emit(ARM_MOV_R(ARM_R1, rn), ctx);
+
+ ctx->seen |= SEEN_CALL;
+ emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
+ emit_blx_r(ARM_R3, ctx);
+
+ if (rd != ARM_R0)
+ emit(ARM_MOV_R(rd, ARM_R0), ctx);
+}
+
+static inline void update_on_xread(struct jit_ctx *ctx)
+{
+ if (!(ctx->seen & SEEN_X))
+ ctx->flags |= FLAG_NEED_X_RESET;
+
+ ctx->seen |= SEEN_X;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+ void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
+ const struct sk_filter *prog = ctx->skf;
+ const struct sock_filter *inst;
+ unsigned i, load_order, off, condt;
+ int imm12;
+ u32 k;
+
+ for (i = 0; i < prog->len; i++) {
+ inst = &(prog->insns[i]);
+ /* K as an immediate value operand */
+ k = inst->k;
+
+ /* compute offsets only in the fake pass */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ switch (inst->code) {
+ case BPF_S_LD_IMM:
+ emit_mov_i(r_A, k, ctx);
+ break;
+ case BPF_S_LD_W_LEN:
+ ctx->seen |= SEEN_SKB;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+ emit(ARM_LDR_I(r_A, r_skb,
+ offsetof(struct sk_buff, len)), ctx);
+ break;
+ case BPF_S_LD_MEM:
+ /* A = scratch[k] */
+ ctx->seen |= SEEN_MEM_WORD(k);
+ emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
+ break;
+ case BPF_S_LD_W_ABS:
+ load_order = 2;
+ goto load;
+ case BPF_S_LD_H_ABS:
+ load_order = 1;
+ goto load;
+ case BPF_S_LD_B_ABS:
+ load_order = 0;
+load:
+ /* the interpreter will deal with the negative K */
+ if ((int)k < 0)
+ return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+
+ if (load_order > 0) {
+ emit(ARM_SUB_I(r_scratch, r_skb_hl,
+ 1 << load_order), ctx);
+ emit(ARM_CMP_R(r_scratch, r_off), ctx);
+ condt = ARM_COND_HS;
+ } else {
+ emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
+ condt = ARM_COND_HI;
+ }
+
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
+ if (load_order == 0)
+ _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
+ ctx);
+ else if (load_order == 1)
+ emit_load_be16(condt, r_A, r_scratch, ctx);
+ else if (load_order == 2)
+ emit_load_be32(condt, r_A, r_scratch, ctx);
+
+ _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
+
+ /* the slowpath */
+ emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
+ emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+ /* the offset is already in R1 */
+ emit_blx_r(ARM_R3, ctx);
+ /* check the result of skb_copy_bits */
+ emit(ARM_CMP_I(ARM_R1, 0), ctx);
+ emit_err_ret(ARM_COND_NE, ctx);
+ emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+ break;
+ case BPF_S_LD_W_IND:
+ load_order = 2;
+ goto load_ind;
+ case BPF_S_LD_H_IND:
+ load_order = 1;
+ goto load_ind;
+ case BPF_S_LD_B_IND:
+ load_order = 0;
+load_ind:
+ OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
+ goto load_common;
+ case BPF_S_LDX_IMM:
+ ctx->seen |= SEEN_X;
+ emit_mov_i(r_X, k, ctx);
+ break;
+ case BPF_S_LDX_W_LEN:
+ ctx->seen |= SEEN_X | SEEN_SKB;
+ emit(ARM_LDR_I(r_X, r_skb,
+ offsetof(struct sk_buff, len)), ctx);
+ break;
+ case BPF_S_LDX_MEM:
+ ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
+ emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
+ break;
+ case BPF_S_LDX_B_MSH:
+ /* x = ((*(frame + k)) & 0xf) << 2; */
+ ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
+ /* the interpreter should deal with the negative K */
+ if (k < 0)
+ return -1;
+ /* offset in r1: we might have to take the slow path */
+ emit_mov_i(r_off, k, ctx);
+ emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
+
+ /* load in r0: common with the slowpath */
+ _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
+ ARM_R1), ctx);
+ /*
+ * emit_mov_i() might generate one or two instructions,
+ * the same holds for emit_blx_r()
+ */
+ _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
+
+ emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+ /* r_off is r1 */
+ emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
+ emit_blx_r(ARM_R3, ctx);
+ /* check the return value of skb_copy_bits */
+ emit(ARM_CMP_I(ARM_R1, 0), ctx);
+ emit_err_ret(ARM_COND_NE, ctx);
+
+ emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
+ emit(ARM_LSL_I(r_X, r_X, 2), ctx);
+ break;
+ case BPF_S_ST:
+ ctx->seen |= SEEN_MEM_WORD(k);
+ emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
+ break;
+ case BPF_S_STX:
+ update_on_xread(ctx);
+ ctx->seen |= SEEN_MEM_WORD(k);
+ emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
+ break;
+ case BPF_S_ALU_ADD_K:
+ /* A += K */
+ OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
+ break;
+ case BPF_S_ALU_ADD_X:
+ update_on_xread(ctx);
+ emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_SUB_K:
+ /* A -= K */
+ OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
+ break;
+ case BPF_S_ALU_SUB_X:
+ update_on_xread(ctx);
+ emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_MUL_K:
+ /* A *= K */
+ emit_mov_i(r_scratch, k, ctx);
+ emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
+ break;
+ case BPF_S_ALU_MUL_X:
+ update_on_xread(ctx);
+ emit(ARM_MUL(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_DIV_K:
+ /* current k == reciprocal_value(userspace k) */
+ emit_mov_i(r_scratch, k, ctx);
+ /* A = top 32 bits of the product */
+ emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
+ break;
+ case BPF_S_ALU_DIV_X:
+ update_on_xread(ctx);
+ emit(ARM_CMP_I(r_X, 0), ctx);
+ emit_err_ret(ARM_COND_EQ, ctx);
+ emit_udiv(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_S_ALU_OR_K:
+ /* A |= K */
+ OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
+ break;
+ case BPF_S_ALU_OR_X:
+ update_on_xread(ctx);
+ emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_AND_K:
+ /* A &= K */
+ OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
+ break;
+ case BPF_S_ALU_AND_X:
+ update_on_xread(ctx);
+ emit(ARM_AND_R(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_LSH_K:
+ if (unlikely(k > 31))
+ return -1;
+ emit(ARM_LSL_I(r_A, r_A, k), ctx);
+ break;
+ case BPF_S_ALU_LSH_X:
+ update_on_xread(ctx);
+ emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_RSH_K:
+ if (unlikely(k > 31))
+ return -1;
+ emit(ARM_LSR_I(r_A, r_A, k), ctx);
+ break;
+ case BPF_S_ALU_RSH_X:
+ update_on_xread(ctx);
+ emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_NEG:
+ /* A = -A */
+ emit(ARM_RSB_I(r_A, r_A, 0), ctx);
+ break;
+ case BPF_S_JMP_JA:
+ /* pc += K */
+ emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
+ break;
+ case BPF_S_JMP_JEQ_K:
+ /* pc += (A == K) ? pc->jt : pc->jf */
+ condt = ARM_COND_EQ;
+ goto cmp_imm;
+ case BPF_S_JMP_JGT_K:
+ /* pc += (A > K) ? pc->jt : pc->jf */
+ condt = ARM_COND_HI;
+ goto cmp_imm;
+ case BPF_S_JMP_JGE_K:
+ /* pc += (A >= K) ? pc->jt : pc->jf */
+ condt = ARM_COND_HS;
+cmp_imm:
+ imm12 = imm8m(k);
+ if (imm12 < 0) {
+ emit_mov_i_no8m(r_scratch, k, ctx);
+ emit(ARM_CMP_R(r_A, r_scratch), ctx);
+ } else {
+ emit(ARM_CMP_I(r_A, imm12), ctx);
+ }
+cond_jump:
+ if (inst->jt)
+ _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
+ ctx)), ctx);
+ if (inst->jf)
+ _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
+ ctx)), ctx);
+ break;
+ case BPF_S_JMP_JEQ_X:
+ /* pc += (A == X) ? pc->jt : pc->jf */
+ condt = ARM_COND_EQ;
+ goto cmp_x;
+ case BPF_S_JMP_JGT_X:
+ /* pc += (A > X) ? pc->jt : pc->jf */
+ condt = ARM_COND_HI;
+ goto cmp_x;
+ case BPF_S_JMP_JGE_X:
+ /* pc += (A >= X) ? pc->jt : pc->jf */
+ condt = ARM_COND_CS;
+cmp_x:
+ update_on_xread(ctx);
+ emit(ARM_CMP_R(r_A, r_X), ctx);
+ goto cond_jump;
+ case BPF_S_JMP_JSET_K:
+ /* pc += (A & K) ? pc->jt : pc->jf */
+ condt = ARM_COND_NE;
+ /* not set iff all zeroes iff Z==1 iff EQ */
+
+ imm12 = imm8m(k);
+ if (imm12 < 0) {
+ emit_mov_i_no8m(r_scratch, k, ctx);
+ emit(ARM_TST_R(r_A, r_scratch), ctx);
+ } else {
+ emit(ARM_TST_I(r_A, imm12), ctx);
+ }
+ goto cond_jump;
+ case BPF_S_JMP_JSET_X:
+ /* pc += (A & X) ? pc->jt : pc->jf */
+ update_on_xread(ctx);
+ condt = ARM_COND_NE;
+ emit(ARM_TST_R(r_A, r_X), ctx);
+ goto cond_jump;
+ case BPF_S_RET_A:
+ emit(ARM_MOV_R(ARM_R0, r_A), ctx);
+ goto b_epilogue;
+ case BPF_S_RET_K:
+ if ((k == 0) && (ctx->ret0_fp_idx < 0))
+ ctx->ret0_fp_idx = i;
+ emit_mov_i(ARM_R0, k, ctx);
+b_epilogue:
+ if (i != ctx->skf->len - 1)
+ emit(ARM_B(b_imm(prog->len, ctx)), ctx);
+ break;
+ case BPF_S_MISC_TAX:
+ /* X = A */
+ ctx->seen |= SEEN_X;
+ emit(ARM_MOV_R(r_X, r_A), ctx);
+ break;
+ case BPF_S_MISC_TXA:
+ /* A = X */
+ update_on_xread(ctx);
+ emit(ARM_MOV_R(r_A, r_X), ctx);
+ break;
+ case BPF_S_ANC_PROTOCOL:
+ /* A = ntohs(skb->protocol) */
+ ctx->seen |= SEEN_SKB;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ protocol) != 2);
+ off = offsetof(struct sk_buff, protocol);
+ emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
+ emit_swap16(r_A, r_scratch, ctx);
+ break;
+ case BPF_S_ANC_CPU:
+ /* r_scratch = current_thread_info() */
+ OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
+ /* A = current_thread_info()->cpu */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
+ off = offsetof(struct thread_info, cpu);
+ emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+ break;
+ case BPF_S_ANC_IFINDEX:
+ /* A = skb->dev->ifindex */
+ ctx->seen |= SEEN_SKB;
+ off = offsetof(struct sk_buff, dev);
+ emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
+
+ emit(ARM_CMP_I(r_scratch, 0), ctx);
+ emit_err_ret(ARM_COND_EQ, ctx);
+
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ ifindex) != 4);
+ off = offsetof(struct net_device, ifindex);
+ emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+ break;
+ case BPF_S_ANC_MARK:
+ ctx->seen |= SEEN_SKB;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ off = offsetof(struct sk_buff, mark);
+ emit(ARM_LDR_I(r_A, r_skb, off), ctx);
+ break;
+ case BPF_S_ANC_RXHASH:
+ ctx->seen |= SEEN_SKB;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
+ off = offsetof(struct sk_buff, rxhash);
+ emit(ARM_LDR_I(r_A, r_skb, off), ctx);
+ break;
+ case BPF_S_ANC_QUEUE:
+ ctx->seen |= SEEN_SKB;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ queue_mapping) != 2);
+ BUILD_BUG_ON(offsetof(struct sk_buff,
+ queue_mapping) > 0xff);
+ off = offsetof(struct sk_buff, queue_mapping);
+ emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ /* compute offsets only during the first pass */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+
+void bpf_jit_compile(struct sk_filter *fp)
+{
+ struct jit_ctx ctx;
+ unsigned tmp_idx;
+ unsigned alloc_size;
+
+ if (!bpf_jit_enable)
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.skf = fp;
+ ctx.ret0_fp_idx = -1;
+
+ ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1));
+ if (ctx.offsets == NULL)
+ return;
+
+ /* fake pass to fill in the ctx->seen */
+ if (unlikely(build_body(&ctx)))
+ goto out;
+
+ tmp_idx = ctx.idx;
+ build_prologue(&ctx);
+ ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
+
+#if __LINUX_ARM_ARCH__ < 7
+ tmp_idx = ctx.idx;
+ build_epilogue(&ctx);
+ ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
+
+ ctx.idx += ctx.imm_count;
+ if (ctx.imm_count) {
+ ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count);
+ if (ctx.imms == NULL)
+ goto out;
+ }
+#else
+ /* there's nothing after the epilogue on ARMv7 */
+ build_epilogue(&ctx);
+#endif
+
+ alloc_size = 4 * ctx.idx;
+ ctx.target = module_alloc(max(sizeof(struct work_struct),
+ alloc_size));
+ if (unlikely(ctx.target == NULL))
+ goto out;
+
+ ctx.idx = 0;
+ build_prologue(&ctx);
+ build_body(&ctx);
+ build_epilogue(&ctx);
+
+ flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
+
+#if __LINUX_ARM_ARCH__ < 7
+ if (ctx.imm_count)
+ kfree(ctx.imms);
+#endif
+
+ if (bpf_jit_enable > 1)
+ print_hex_dump(KERN_INFO, "BPF JIT code: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
+ alloc_size, false);
+
+ fp->bpf_func = (void *)ctx.target;
+out:
+ kfree(ctx.offsets);
+ return;
+}
+
+static void bpf_jit_free_worker(struct work_struct *work)
+{
+ module_free(NULL, work);
+}
+
+void bpf_jit_free(struct sk_filter *fp)
+{
+ struct work_struct *work;
+
+ if (fp->bpf_func != sk_run_filter) {
+ work = (struct work_struct *)fp->bpf_func;
+
+ INIT_WORK(work, bpf_jit_free_worker);
+ schedule_work(work);
+ }
+}
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
new file mode 100644
index 000000000000..99ae5e3f46d2
--- /dev/null
+++ b/arch/arm/net/bpf_jit_32.h
@@ -0,0 +1,190 @@
+/*
+ * Just-In-Time compiler for BPF filters on 32bit ARM
+ *
+ * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef PFILTER_OPCODES_ARM_H
+#define PFILTER_OPCODES_ARM_H
+
+#define ARM_R0 0
+#define ARM_R1 1
+#define ARM_R2 2
+#define ARM_R3 3
+#define ARM_R4 4
+#define ARM_R5 5
+#define ARM_R6 6
+#define ARM_R7 7
+#define ARM_R8 8
+#define ARM_R9 9
+#define ARM_R10 10
+#define ARM_FP 11
+#define ARM_IP 12
+#define ARM_SP 13
+#define ARM_LR 14
+#define ARM_PC 15
+
+#define ARM_COND_EQ 0x0
+#define ARM_COND_NE 0x1
+#define ARM_COND_CS 0x2
+#define ARM_COND_HS ARM_COND_CS
+#define ARM_COND_CC 0x3
+#define ARM_COND_LO ARM_COND_CC
+#define ARM_COND_MI 0x4
+#define ARM_COND_PL 0x5
+#define ARM_COND_VS 0x6
+#define ARM_COND_VC 0x7
+#define ARM_COND_HI 0x8
+#define ARM_COND_LS 0x9
+#define ARM_COND_GE 0xa
+#define ARM_COND_LT 0xb
+#define ARM_COND_GT 0xc
+#define ARM_COND_LE 0xd
+#define ARM_COND_AL 0xe
+
+/* register shift types */
+#define SRTYPE_LSL 0
+#define SRTYPE_LSR 1
+#define SRTYPE_ASR 2
+#define SRTYPE_ROR 3
+
+#define ARM_INST_ADD_R 0x00800000
+#define ARM_INST_ADD_I 0x02800000
+
+#define ARM_INST_AND_R 0x00000000
+#define ARM_INST_AND_I 0x02000000
+
+#define ARM_INST_BIC_R 0x01c00000
+#define ARM_INST_BIC_I 0x03c00000
+
+#define ARM_INST_B 0x0a000000
+#define ARM_INST_BX 0x012FFF10
+#define ARM_INST_BLX_R 0x012fff30
+
+#define ARM_INST_CMP_R 0x01500000
+#define ARM_INST_CMP_I 0x03500000
+
+#define ARM_INST_LDRB_I 0x05d00000
+#define ARM_INST_LDRB_R 0x07d00000
+#define ARM_INST_LDRH_I 0x01d000b0
+#define ARM_INST_LDR_I 0x05900000
+
+#define ARM_INST_LDM 0x08900000
+
+#define ARM_INST_LSL_I 0x01a00000
+#define ARM_INST_LSL_R 0x01a00010
+
+#define ARM_INST_LSR_I 0x01a00020
+#define ARM_INST_LSR_R 0x01a00030
+
+#define ARM_INST_MOV_R 0x01a00000
+#define ARM_INST_MOV_I 0x03a00000
+#define ARM_INST_MOVW 0x03000000
+#define ARM_INST_MOVT 0x03400000
+
+#define ARM_INST_MUL 0x00000090
+
+#define ARM_INST_POP 0x08bd0000
+#define ARM_INST_PUSH 0x092d0000
+
+#define ARM_INST_ORR_R 0x01800000
+#define ARM_INST_ORR_I 0x03800000
+
+#define ARM_INST_REV 0x06bf0f30
+#define ARM_INST_REV16 0x06bf0fb0
+
+#define ARM_INST_RSB_I 0x02600000
+
+#define ARM_INST_SUB_R 0x00400000
+#define ARM_INST_SUB_I 0x02400000
+
+#define ARM_INST_STR_I 0x05800000
+
+#define ARM_INST_TST_R 0x01100000
+#define ARM_INST_TST_I 0x03100000
+
+#define ARM_INST_UDIV 0x0730f010
+
+#define ARM_INST_UMULL 0x00800090
+
+/* register */
+#define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm))
+/* immediate */
+#define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm))
+
+#define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm)
+#define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm)
+
+#define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm)
+#define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm)
+
+#define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm)
+#define ARM_BIC_I(rd, rn, imm) _AL3_I(ARM_INST_BIC, rd, rn, imm)
+
+#define ARM_B(imm24) (ARM_INST_B | ((imm24) & 0xffffff))
+#define ARM_BX(rm) (ARM_INST_BX | (rm))
+#define ARM_BLX_R(rm) (ARM_INST_BLX_R | (rm))
+
+#define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm)
+#define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm)
+
+#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
+ | (off))
+#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
+ | (off))
+#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
+ | (rm))
+#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
+ | (((off) & 0xf0) << 4) | ((off) & 0xf))
+
+#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs))
+
+#define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8)
+#define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7)
+
+#define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8)
+#define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7)
+
+#define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm)
+#define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm)
+
+#define ARM_MOVW(rd, imm) \
+ (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff))
+
+#define ARM_MOVT(rd, imm) \
+ (ARM_INST_MOVT | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff))
+
+#define ARM_MUL(rd, rm, rn) (ARM_INST_MUL | (rd) << 16 | (rm) << 8 | (rn))
+
+#define ARM_POP(regs) (ARM_INST_POP | (regs))
+#define ARM_PUSH(regs) (ARM_INST_PUSH | (regs))
+
+#define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm)
+#define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm)
+#define ARM_ORR_S(rd, rn, rm, type, rs) \
+ (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7)
+
+#define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm))
+#define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm))
+
+#define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm)
+
+#define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm)
+#define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm)
+
+#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
+ | (off))
+
+#define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm)
+#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
+
+#define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8)
+
+#define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \
+ | (rd_lo) << 12 | (rm) << 8 | rn)
+
+#endif /* PFILTER_OPCODES_ARM_H */
diff --git a/arch/arm/plat-mxc/include/mach/hardware.h b/arch/arm/plat-mxc/include/mach/hardware.h
index a599f01f8b92..0630513554de 100644
--- a/arch/arm/plat-mxc/include/mach/hardware.h
+++ b/arch/arm/plat-mxc/include/mach/hardware.h
@@ -22,11 +22,8 @@
#include <asm/sizes.h>
-#ifdef __ASSEMBLER__
-#define IOMEM(addr) (addr)
-#else
-#define IOMEM(addr) ((void __force __iomem *)(addr))
-#endif
+#define addr_in_module(addr, mod) \
+ ((unsigned long)(addr) - mod ## _BASE_ADDR < mod ## _SIZE)
#define IMX_IO_P2V_MODULE(addr, module) \
(((addr) - module ## _BASE_ADDR) < module ## _SIZE ? \
diff --git a/arch/arm/plat-mxc/include/mach/io.h b/arch/arm/plat-mxc/include/mach/io.h
deleted file mode 100644
index 338300b18b00..000000000000
--- a/arch/arm/plat-mxc/include/mach/io.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_MXC_IO_H__
-#define __ASM_ARCH_MXC_IO_H__
-
-/* Allow IO space to be anywhere in the memory */
-#define IO_SPACE_LIMIT 0xffffffff
-
-#define __arch_ioremap __imx_ioremap
-#define __arch_iounmap __iounmap
-
-#define addr_in_module(addr, mod) \
- ((unsigned long)(addr) - mod ## _BASE_ADDR < mod ## _SIZE)
-
-extern void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int);
-
-static inline void __iomem *
-__imx_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
-{
- if (imx_ioremap != NULL)
- return imx_ioremap(phys_addr, size, mtype);
- else
- return __arm_ioremap(phys_addr, size, mtype);
-}
-
-/* io address mapping macro */
-#define __io(a) __typesafe_io(a)
-
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
index bca4914b4b9d..4c48c8b60b54 100644
--- a/arch/arm/plat-nomadik/Kconfig
+++ b/arch/arm/plat-nomadik/Kconfig
@@ -23,7 +23,6 @@ config HAS_MTU
config NOMADIK_MTU_SCHED_CLOCK
bool
depends on HAS_MTU
- select HAVE_SCHED_CLOCK
help
Use the Multi Timer Unit as the sched_clock.
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index fd0ee84c45d1..9ff93b065686 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -200,8 +200,7 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
sg.dma_address = addr;
sg.length = size;
- return chan->device->device_prep_slave_sg(chan, &sg, 1,
- direction, flags);
+ return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
}
#else
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index b8a96c6a1a30..2f6e9924a814 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -158,10 +158,6 @@
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-#define OMAP_GPIO_IRQ(nr) (OMAP_GPIO_IS_MPUIO(nr) ? \
- IH_MPUIO_BASE + ((nr) & 0x0f) : \
- IH_GPIO_BASE + (nr))
-
struct omap_gpio_dev_attr {
int bank_width; /* GPIO bank width */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
diff --git a/arch/arm/plat-omap/include/plat/hardware.h b/arch/arm/plat-omap/include/plat/hardware.h
index 537b05ae1f51..e897978371c2 100644
--- a/arch/arm/plat-omap/include/plat/hardware.h
+++ b/arch/arm/plat-omap/include/plat/hardware.h
@@ -43,12 +43,6 @@
#endif
#include <plat/serial.h>
-#ifdef __ASSEMBLER__
-#define IOMEM(x) (x)
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
/*
* ---------------------------------------------------------------------------
* Common definitions for all OMAP processors
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index 925b12b500dc..9bb978ecd884 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <mach/io.h>
/* SDRC register offsets - read/write with sdrc_{read,write}_reg() */
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index d0fc9f4dc155..762eeb0626c1 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -112,7 +112,6 @@ extern int omap4430_phy_suspend(struct device *dev, int suspend);
*/
#define OMAP2_L4_IO_OFFSET 0xb2000000
-#define IOMEM(x) ((void __force __iomem *)(x))
#define OMAP2_L4_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L4_IO_OFFSET)
static inline u8 omap_readb(u32 pa)
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 0db73ae646bc..290942d9adda 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -36,6 +36,7 @@
#include <asm/irq.h>
#include <asm/cacheflush.h>
#include <asm/system_info.h>
+#include <asm/system_misc.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index 301d9c319d0b..eb9f4f534006 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -79,11 +79,11 @@ static int samsung_dmadev_prepare(unsigned ch,
info->len, offset_in_page(info->buf));
sg_dma_address(&sg) = info->buf;
- desc = chan->device->device_prep_slave_sg(chan,
+ desc = dmaengine_prep_slave_sg(chan,
&sg, 1, info->direction, DMA_PREP_INTERRUPT);
break;
case DMA_CYCLIC:
- desc = chan->device->device_prep_dma_cyclic(chan,
+ desc = dmaengine_prep_dma_cyclic(chan,
info->buf, info->len, info->period, info->direction);
break;
default:
diff --git a/arch/arm/plat-spear/include/plat/hardware.h b/arch/arm/plat-spear/include/plat/hardware.h
index 66d677225d15..70187d763e26 100644
--- a/arch/arm/plat-spear/include/plat/hardware.h
+++ b/arch/arm/plat-spear/include/plat/hardware.h
@@ -14,10 +14,4 @@
#ifndef __PLAT_HARDWARE_H
#define __PLAT_HARDWARE_H
-#ifndef __ASSEMBLY__
-#define IOMEM(x) ((void __iomem __force *)(x))
-#else
-#define IOMEM(x) (x)
-#endif
-
#endif /* __PLAT_HARDWARE_H */
diff --git a/arch/arm/plat-spear/include/plat/io.h b/arch/arm/plat-spear/include/plat/io.h
deleted file mode 100644
index 4d4ba822b3eb..000000000000
--- a/arch/arm/plat-spear/include/plat/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/io.h
- *
- * IO definitions for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_IO_H
-#define __PLAT_IO_H
-
-#define IO_SPACE_LIMIT 0xFFFFFFFF
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif /* __PLAT_IO_H */
diff --git a/arch/arm/plat-spear/include/plat/keyboard.h b/arch/arm/plat-spear/include/plat/keyboard.h
index c16cc31ecbed..0562f134621d 100644
--- a/arch/arm/plat-spear/include/plat/keyboard.h
+++ b/arch/arm/plat-spear/include/plat/keyboard.h
@@ -159,11 +159,4 @@ struct kbd_platform_data {
unsigned int mode;
};
-/* This function is used to set platform data field of pdev->dev */
-static inline void
-kbd_set_plat_data(struct platform_device *pdev, struct kbd_platform_data *data)
-{
- pdev->dev.platform_data = data;
-}
-
#endif /* __PLAT_KEYBOARD_H */
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig
index 52353beb369d..043f7b02a9e7 100644
--- a/arch/arm/plat-versatile/Kconfig
+++ b/arch/arm/plat-versatile/Kconfig
@@ -11,7 +11,6 @@ config PLAT_VERSATILE_LEDS
depends on ARCH_REALVIEW || ARCH_VERSATILE
config PLAT_VERSATILE_SCHED_CLOCK
- def_bool y if !ARCH_INTEGRATOR_AP
- select HAVE_SCHED_CLOCK
+ def_bool y
endif
diff --git a/arch/avr32/include/asm/posix_types.h b/arch/avr32/include/asm/posix_types.h
index fe0c0c014389..74667bfc88cc 100644
--- a/arch/avr32/include/asm/posix_types.h
+++ b/arch/avr32/include/asm/posix_types.h
@@ -14,112 +14,27 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
+#define __kernel_old_uid_t __kernel_old_uid_t
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif /* __ASM_AVR32_POSIX_TYPES_H */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 889c544688ca..0445c4fd67e3 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1351,7 +1351,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
goto fail;
slave->sdata.dma_dev = &dw_dmac0_device.dev;
- slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0)
| DWC_CFGH_DST_PER(1));
slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL
@@ -2046,27 +2045,19 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
/* Check if DMA slave interface for capture should be configured. */
if (flags & AC97C_CAPTURE) {
rx_dws->dma_dev = &dw_dmac0_device.dev;
- rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3);
rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
rx_dws->src_master = 0;
rx_dws->dst_master = 1;
- rx_dws->src_msize = DW_DMA_MSIZE_1;
- rx_dws->dst_msize = DW_DMA_MSIZE_1;
- rx_dws->fc = DW_DMA_FC_D_P2M;
}
/* Check if DMA slave interface for playback should be configured. */
if (flags & AC97C_PLAYBACK) {
tx_dws->dma_dev = &dw_dmac0_device.dev;
- tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
tx_dws->cfg_hi = DWC_CFGH_DST_PER(4);
tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
tx_dws->src_master = 0;
tx_dws->dst_master = 1;
- tx_dws->src_msize = DW_DMA_MSIZE_1;
- tx_dws->dst_msize = DW_DMA_MSIZE_1;
- tx_dws->fc = DW_DMA_FC_D_M2P;
}
if (platform_device_add_data(pdev, data,
@@ -2136,14 +2127,10 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data)
dws = &data->dws;
dws->dma_dev = &dw_dmac0_device.dev;
- dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
dws->cfg_hi = DWC_CFGH_DST_PER(2);
dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
dws->src_master = 0;
dws->dst_master = 1;
- dws->src_msize = DW_DMA_MSIZE_1;
- dws->dst_msize = DW_DMA_MSIZE_1;
- dws->fc = DW_DMA_FC_D_M2P;
if (platform_device_add_data(pdev, data,
sizeof(struct atmel_abdac_pdata)))
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
index a9b38967f703..4bba58561d5c 100644
--- a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
+++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
@@ -14,11 +14,4 @@ struct mci_dma_data {
#define slave_data_ptr(s) (&(s)->sdata)
#define find_slave_dev(s) ((s)->sdata.dma_dev)
-#define setup_dma_addr(s, t, r) do { \
- if (s) { \
- (s)->sdata.tx_reg = (t); \
- (s)->sdata.rx_reg = (r); \
- } \
-} while (0)
-
#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 3c64b2894c13..1c3ccd416d50 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -11,7 +11,7 @@ config TMS320C6X
select HAVE_DMA_API_DEBUG
select HAVE_GENERIC_HARDIRQS
select HAVE_MEMBLOCK
- select HAVE_SPARSE_IRQ
+ select SPARSE_IRQ
select IRQ_DOMAIN
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index ce3fb25a460b..72b3cd6eda0b 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -12,55 +12,25 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
+#define __kernel_uid_t __kernel_uid_t
+
typedef __SIZE_TYPE__ __kernel_size_t;
typedef long __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_size_t __kernel_size_t
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-#define __FD_SET(fd,fdsetp) set_bit(fd, (void *)(fdsetp))
-
-#undef __FD_CLR
-#define __FD_CLR(fd,fdsetp) clear_bit(fd, (void *)(fdsetp))
-
-#undef __FD_ISSET
-#define __FD_ISSET(fd,fdsetp) test_bit(fd, (void *)(fdsetp))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) memset((void *)(fdsetp), 0, __FDSET_LONGS << 2)
-
-#endif /* __KERNEL__ */
+#define __kernel_old_dev_t __kernel_old_dev_t
#endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/frv/include/asm/posix_types.h b/arch/frv/include/asm/posix_types.h
index a9f1f5be0632..3f34cb45fbb3 100644
--- a/arch/frv/include/asm/posix_types.h
+++ b/arch/frv/include/asm/posix_types.h
@@ -7,56 +7,23 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
+#define __kernel_uid_t __kernel_uid_t
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-
-#undef __FD_CLR
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-
-#undef __FD_ISSET
-#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif
diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/asm/posix_types.h
index 6f833a16f694..bc4c34efb1ad 100644
--- a/arch/h8300/include/asm/posix_types.h
+++ b/arch/h8300/include/asm/posix_types.h
@@ -7,54 +7,23 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_uid_t __kernel_uid_t
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-
-#undef __FD_CLR
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-
-#undef __FD_ISSET
-#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..4c96187e2049
--- /dev/null
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -0,0 +1 @@
+#include <asm/intrinsics.h>
diff --git a/arch/ia64/include/asm/posix_types.h b/arch/ia64/include/asm/posix_types.h
index 17885567b731..7323ab9467eb 100644
--- a/arch/ia64/include/asm/posix_types.h
+++ b/arch/ia64/include/asm/posix_types.h
@@ -1,126 +1,11 @@
#ifndef _ASM_IA64_POSIX_TYPES_H
#define _ASM_IA64_POSIX_TYPES_H
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- *
- * Based on <asm-alpha/posix_types.h>.
- *
- * Modified 1998-2000, 2003
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef long long __kernel_loff_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-
-typedef unsigned int __kernel_old_dev_t;
-
-# ifdef __KERNEL__
-
-# ifndef __GNUC__
-
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
-#define __FD_ZERO(set) \
- ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
+#define __kernel_nlink_t __kernel_nlink_t
-# else /* !__GNUC__ */
-
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
-
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
+typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
+#include <asm-generic/posix_types.h>
-# endif /* !__GNUC__ */
-# endif /* __KERNEL__ */
#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index af5650169043..a48bd9a9927b 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -269,8 +269,8 @@ void foo(void)
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
- DEFINE(IA64_GTOD_LOCK_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, lock));
+ DEFINE(IA64_GTOD_SEQ_OFFSET,
+ offsetof (struct fsyscall_gtod_data_t, seq));
DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, wall_time));
DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index f15d8601827f..cc26edac0ec6 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -173,7 +173,7 @@ ENTRY(fsys_set_tid_address)
FSYS_RETURN
END(fsys_set_tid_address)
-#if IA64_GTOD_LOCK_OFFSET !=0
+#if IA64_GTOD_SEQ_OFFSET !=0
#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
#endif
#if IA64_ITC_JITTER_OFFSET !=0
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 57d2ee6c83e1..146b15b5fec3 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,7 +6,7 @@
*/
struct fsyscall_gtod_data_t {
- seqlock_t lock;
+ seqcount_t seq;
struct timespec wall_time;
struct timespec monotonic_time;
cycle_t clk_mask;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 9dc52b63fc87..ce74e143aea3 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -38,6 +38,7 @@
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
+#include <asm/switch_to.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aa94bdda9de8..ecc904b33c5f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -34,9 +34,7 @@
static cycle_t itc_get_cycles(struct clocksource *cs);
-struct fsyscall_gtod_data_t fsyscall_gtod_data = {
- .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
-};
+struct fsyscall_gtod_data_t fsyscall_gtod_data;
struct itc_jitter_data_t itc_jitter_data;
@@ -459,9 +457,7 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timespec *wall, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
- unsigned long flags;
-
- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+ write_seqcount_begin(&fsyscall_gtod_data.seq);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
@@ -484,6 +480,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
+ write_seqcount_end(&fsyscall_gtod_data.seq);
}
diff --git a/arch/m32r/include/asm/posix_types.h b/arch/m32r/include/asm/posix_types.h
index b309c5858637..0195850e1f88 100644
--- a/arch/m32r/include/asm/posix_types.h
+++ b/arch/m32r/include/asm/posix_types.h
@@ -7,112 +7,22 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_uid_t __kernel_uid_t
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_M32R_POSIX_TYPES_H */
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 98d0970d9bad..6373093be72b 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -7,55 +7,22 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
+#define __kernel_uid_t __kernel_uid_t
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-
-#undef __FD_CLR
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-
-#undef __FD_ISSET
-#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index edbbae17e820..ce30e2f91d77 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2457,6 +2457,7 @@ config MIPS32_COMPAT
config COMPAT
bool
depends on MIPS32_COMPAT
+ select ARCH_WANT_OLD_COMPAT_IPC
default y
config SYSVIPC_COMPAT
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 0a430e06f5e5..e44a55bc7f0d 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -60,7 +60,7 @@ static int __init flash_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_parse_register(mymtd, part_probe_types,
- 0, NULL, 0);
+ NULL, NULL, 0);
} else {
pr_err("Failed to register MTD device for flash\n");
}
diff --git a/arch/mips/configs/db1300_defconfig b/arch/mips/configs/db1300_defconfig
index c38b190151c4..3590ab5d9791 100644
--- a/arch/mips/configs/db1300_defconfig
+++ b/arch/mips/configs/db1300_defconfig
@@ -133,7 +133,7 @@ CONFIG_BLK_DEV_BSG=y
CONFIG_IOSCHED_NOOP=y
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
-CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_UNINLINE_SPIN_UNLOCK is not set
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
CONFIG_INLINE_READ_UNLOCK=y
CONFIG_INLINE_READ_UNLOCK_IRQ=y
diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h
index c200102c8586..e0308dcca135 100644
--- a/arch/mips/include/asm/posix_types.h
+++ b/arch/mips/include/asm/posix_types.h
@@ -17,128 +17,21 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-#if (_MIPS_SZLONG == 32)
-typedef unsigned long __kernel_nlink_t;
-#endif
#if (_MIPS_SZLONG == 64)
typedef unsigned int __kernel_nlink_t;
+#define __kernel_nlink_t __kernel_nlink_t
#endif
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-#if (_MIPS_SZLONG == 32)
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-#endif
-#if (_MIPS_SZLONG == 64)
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#endif
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef long __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-typedef unsigned int __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
+typedef long __kernel_daddr_t;
+#define __kernel_daddr_t __kernel_daddr_t
-typedef struct {
#if (_MIPS_SZLONG == 32)
+typedef struct {
long val[2];
-#endif
-#if (_MIPS_SZLONG == 64)
- int val[2];
-#endif
} __kernel_fsid_t;
+#define __kernel_fsid_t __kernel_fsid_t
+#endif
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 29811f043399..84d0639e4580 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -326,7 +326,7 @@ static void sp_cleanup(void)
i = j * __NFDBITS;
if (i >= fdt->max_fds)
break;
- set = fdt->open_fds->fds_bits[j++];
+ set = fdt->open_fds[j++];
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
diff --git a/arch/mn10300/include/asm/posix_types.h b/arch/mn10300/include/asm/posix_types.h
index 56ffbc158798..ab506181ec31 100644
--- a/arch/mn10300/include/asm/posix_types.h
+++ b/arch/mn10300/include/asm/posix_types.h
@@ -17,14 +17,19 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
+#define __kernel_uid_t __kernel_uid_t
+
#if __GNUC__ == 4
typedef unsigned int __kernel_size_t;
typedef signed int __kernel_ssize_t;
@@ -33,105 +38,11 @@ typedef unsigned long __kernel_size_t;
typedef signed long __kernel_ssize_t;
#endif
typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_size_t __kernel_size_t
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
-#if defined(__KERNEL__) || defined(__USE_ALL)
- int val[2];
-#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
- int __val[2];
-#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static inline void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h
index 00da29a340ba..5212b0357daf 100644
--- a/arch/parisc/include/asm/posix_types.h
+++ b/arch/parisc/include/asm/posix_types.h
@@ -6,123 +6,22 @@
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
+
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef int __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-/* Note these change from narrow to wide kernels */
-#ifdef CONFIG_64BIT
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#else
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-#endif
-typedef long __kernel_time_t;
-typedef char * __kernel_caddr_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+typedef int __kernel_suseconds_t;
+#define __kernel_suseconds_t __kernel_suseconds_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
typedef long long __kernel_off64_t;
typedef unsigned long long __kernel_ino64_t;
-#endif
-
-typedef unsigned int __kernel_old_dev_t;
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-/* compatibility stuff */
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d219ebecabf0..feab3bad6d0f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -133,7 +133,6 @@ config PPC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
select HAVE_GENERIC_HARDIRQS
- select HAVE_SPARSE_IRQ
select SPARSE_IRQ
select IRQ_PER_CPU
select IRQ_DOMAIN
@@ -154,6 +153,7 @@ config COMPAT
bool
default y if PPC64
select COMPAT_BINFMT_ELF
+ select ARCH_WANT_OLD_COMPAT_IPC
config SYSVIPC_COMPAT
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 72d55dbc6119..e5f26890a69e 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -114,16 +114,6 @@ config DEBUGGER
depends on KGDB || XMON
default y
-config VIRQ_DEBUG
- bool "Expose hardware/virtual IRQ mapping via debugfs"
- depends on DEBUG_FS
- help
- This option will show the mapping relationship between hardware irq
- numbers and virtual irq numbers. The mapping is exposed via debugfs
- in the file powerpc/virq_mapping.
-
- If you don't know what this means you don't need it.
-
config BDI_SWITCH
bool "Include BDI-2000 user context switcher"
depends on DEBUG_KERNEL && PPC32
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index c091aaf7685f..f4337bacd0e7 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -165,7 +165,7 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index acf7fb280464..f104ccde6b53 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -279,7 +279,7 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_PPC_EMULATED_STATS=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_CRYPTO_NULL=m
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 7ed8d4cf2719..82b13bfcf3c0 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -95,7 +95,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 5fb0c8a94811..cc87a8441566 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -214,7 +214,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fb51bc90edd2..48d6682f2434 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -216,7 +216,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 1acf65026773..c1442a3758ae 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -457,7 +457,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_BOOTX_TEXT=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 30e7d0d20e49..6608232663cb 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -340,7 +340,7 @@ CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_VIRQ_DEBUG=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h
index c4e396b540df..f1393252bbda 100644
--- a/arch/powerpc/include/asm/posix_types.h
+++ b/arch/powerpc/include/asm/posix_types.h
@@ -7,122 +7,22 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef long __kernel_suseconds_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-typedef unsigned int __kernel_old_uid_t;
-typedef unsigned int __kernel_old_gid_t;
-
#ifdef __powerpc64__
-typedef unsigned long __kernel_nlink_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
typedef unsigned long __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
#else
-typedef unsigned short __kernel_nlink_t;
-typedef short __kernel_ipc_pid_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
-typedef unsigned int __kernel_old_dev_t;
-#endif
-
-#ifdef __powerpc64__
-typedef long long __kernel_loff_t;
-#else
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifndef __GNUC__
-
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
-#define __FD_ZERO(set) \
- ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
-
-#if defined(__KERNEL__)
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = (unsigned long *)p->fds_bits;
- int i;
+typedef long __kernel_ptrdiff_t;
+#define __kernel_size_t __kernel_size_t
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+typedef unsigned short __kernel_nlink_t;
+#define __kernel_nlink_t __kernel_nlink_t
- case 8:
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+typedef short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+#endif
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
+#include <asm-generic/posix_types.h>
-#endif /* defined(__KERNEL__) */
-#endif /* __GNUC__ */
#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 03c5fce2a5b3..c2c5b078ba80 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -122,7 +122,7 @@ static struct spu_context *coredump_next_context(int *fd)
struct spu_context *ctx = NULL;
for (; *fd < fdt->max_fds; (*fd)++) {
- if (!FD_ISSET(*fd, fdt->open_fds))
+ if (!fd_is_open(*fd, fdt))
continue;
file = fcheck(*fd);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 465d5be1f0f4..2b7c0fbe578e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -219,6 +219,7 @@ config COMPAT
prompt "Kernel support for 31 bit emulation"
depends on 64BIT
select COMPAT_BINFMT_ELF
+ select ARCH_WANT_OLD_COMPAT_IPC
help
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index e49db5d5d06f..a3afecdae145 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,8 @@
#ifndef _ASM_S390_CPU_MF_H
#define _ASM_S390_CPU_MF_H
+#include <asm/facility.h>
+
#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */
#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 1c7d6ce328bf..6340178748bf 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -1,6 +1,8 @@
#ifndef __MMU_H
#define __MMU_H
+#include <linux/errno.h>
+
typedef struct {
atomic_t attach_count;
unsigned int flush_mm;
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 8cc113f92523..edf8527ff08d 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -3,7 +3,6 @@
*
* S390 version
*
- * Derived from "include/asm-i386/posix_types.h"
*/
#ifndef __ARCH_S390_POSIX_TYPES_H
@@ -15,22 +14,11 @@
* assume GCC is being used.
*/
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
typedef unsigned long __kernel_size_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
+#define __kernel_size_t __kernel_size_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
#ifndef __s390x__
@@ -42,11 +30,6 @@ typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
#else /* __s390x__ */
@@ -59,49 +42,16 @@ typedef unsigned int __kernel_gid_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-typedef unsigned short __kernel_old_dev_t;
#endif /* __s390x__ */
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long fd, const __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (fdsetp->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) \
- ((void) memset ((void *) (fdsetp), 0, sizeof (__kernel_fd_set)))
+#define __kernel_ino_t __kernel_ino_t
+#define __kernel_mode_t __kernel_mode_t
+#define __kernel_nlink_t __kernel_nlink_t
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+#define __kernel_uid_t __kernel_uid_t
+#define __kernel_gid_t __kernel_gid_t
-#endif /* __KERNEL__ */
+#include <asm-generic/posix_types.h>
#endif
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index ac39e7a731fc..87f080b17af1 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h>
+#include <asm/facility.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
#include <asm/debug.h>
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 8481ecf2ad71..46405086479c 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -18,7 +18,7 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
-#include <asm/system.h>
+#include <asm/ctl_reg.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 609f985198cf..f58f37f66824 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -15,7 +15,6 @@
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/export.h>
-#include <asm/system.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1581ea2e027a..06264ae8ccd9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,7 @@
#include <asm/ipl.h>
#include <asm/uaccess.h>
+#include <asm/facility.h>
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/cpcmd.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a8bf9994b086..1f77227669e8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -32,6 +32,8 @@
#include <linux/slab.h>
#include <linux/crash_dump.h>
#include <asm/asm-offsets.h>
+#include <asm/switch_to.h>
+#include <asm/facility.h>
#include <asm/ipl.h>
#include <asm/setup.h>
#include <asm/irq.h>
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 713fb58ca507..ff9e033ce626 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -5,6 +5,7 @@ config SUPERH
select HAVE_IDE if HAS_IOPORT
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
@@ -22,7 +23,7 @@ config SUPERH
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_GENERIC_HARDIRQS
- select HAVE_SPARSE_IRQ
+ select MAY_HAVE_SPARSE_IRQ
select IRQ_FORCED_THREADING
select RTC_LIB
select GENERIC_ATOMIC64
@@ -161,6 +162,9 @@ config NO_IOPORT
config IO_TRAPPED
bool
+config SWAP_IO_SPACE
+ bool
+
config DMA_COHERENT
bool
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index e5ac12b2ce65..d12fe9ddf3da 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -522,11 +522,18 @@ static void sdhi0_set_pwr(struct platform_device *pdev, int state)
gpio_set_value(GPIO_PTB6, state);
}
+static int sdhi0_get_cd(struct platform_device *pdev)
+{
+ return !gpio_get_value(GPIO_PTY7);
+}
+
static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
.set_pwr = sdhi0_set_pwr,
- .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
+ .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
+ MMC_CAP_NEEDS_POLL,
+ .get_cd = sdhi0_get_cd,
};
static struct resource sdhi0_resources[] = {
@@ -559,11 +566,18 @@ static void sdhi1_set_pwr(struct platform_device *pdev, int state)
gpio_set_value(GPIO_PTB7, state);
}
+static int sdhi1_get_cd(struct platform_device *pdev)
+{
+ return !gpio_get_value(GPIO_PTW7);
+}
+
static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
- .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
+ .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
+ MMC_CAP_NEEDS_POLL,
.set_pwr = sdhi1_set_pwr,
+ .get_cd = sdhi1_get_cd,
};
static struct resource sdhi1_resources[] = {
@@ -1001,6 +1015,7 @@ extern char ecovec24_sdram_leave_end;
static int __init arch_setup(void)
{
struct clk *clk;
+ bool cn12_enabled = false;
/* register board specific self-refresh code */
sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
@@ -1201,9 +1216,13 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6);
+ /* SD-card slot CN11 */
+ /* Card-detect, used on CN11, either with SDHI0 or with SPI */
+ gpio_request(GPIO_PTY7, NULL);
+ gpio_direction_input(GPIO_PTY7);
+
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
/* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
- gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0WP, NULL);
gpio_request(GPIO_FN_SDHI0CMD, NULL);
gpio_request(GPIO_FN_SDHI0CLK, NULL);
@@ -1213,23 +1232,6 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_SDHI0D0, NULL);
gpio_request(GPIO_PTB6, NULL);
gpio_direction_output(GPIO_PTB6, 0);
-
-#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
- /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
- gpio_request(GPIO_FN_SDHI1CD, NULL);
- gpio_request(GPIO_FN_SDHI1WP, NULL);
- gpio_request(GPIO_FN_SDHI1CMD, NULL);
- gpio_request(GPIO_FN_SDHI1CLK, NULL);
- gpio_request(GPIO_FN_SDHI1D3, NULL);
- gpio_request(GPIO_FN_SDHI1D2, NULL);
- gpio_request(GPIO_FN_SDHI1D1, NULL);
- gpio_request(GPIO_FN_SDHI1D0, NULL);
- gpio_request(GPIO_PTB7, NULL);
- gpio_direction_output(GPIO_PTB7, 0);
-
- /* I/O buffer drive ability is high for SDHI1 */
- __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
-#endif /* CONFIG_MMC_SH_MMCIF */
#else
/* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
@@ -1241,12 +1243,51 @@ static int __init arch_setup(void)
gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
gpio_request(GPIO_PTY6, NULL); /* write protect */
gpio_direction_input(GPIO_PTY6);
- gpio_request(GPIO_PTY7, NULL); /* card detect */
- gpio_direction_input(GPIO_PTY7);
spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
#endif
+ /* MMC/SD-card slot CN12 */
+#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
+ /* enable MMCIF (needs DS2.6,7 set to OFF,ON) */
+ gpio_request(GPIO_FN_MMC_D7, NULL);
+ gpio_request(GPIO_FN_MMC_D6, NULL);
+ gpio_request(GPIO_FN_MMC_D5, NULL);
+ gpio_request(GPIO_FN_MMC_D4, NULL);
+ gpio_request(GPIO_FN_MMC_D3, NULL);
+ gpio_request(GPIO_FN_MMC_D2, NULL);
+ gpio_request(GPIO_FN_MMC_D1, NULL);
+ gpio_request(GPIO_FN_MMC_D0, NULL);
+ gpio_request(GPIO_FN_MMC_CLK, NULL);
+ gpio_request(GPIO_FN_MMC_CMD, NULL);
+ gpio_request(GPIO_PTB7, NULL);
+ gpio_direction_output(GPIO_PTB7, 0);
+
+ cn12_enabled = true;
+#elif defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
+ /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
+ gpio_request(GPIO_FN_SDHI1WP, NULL);
+ gpio_request(GPIO_FN_SDHI1CMD, NULL);
+ gpio_request(GPIO_FN_SDHI1CLK, NULL);
+ gpio_request(GPIO_FN_SDHI1D3, NULL);
+ gpio_request(GPIO_FN_SDHI1D2, NULL);
+ gpio_request(GPIO_FN_SDHI1D1, NULL);
+ gpio_request(GPIO_FN_SDHI1D0, NULL);
+ gpio_request(GPIO_PTB7, NULL);
+ gpio_direction_output(GPIO_PTB7, 0);
+
+ /* Card-detect, used on CN12 with SDHI1 */
+ gpio_request(GPIO_PTW7, NULL);
+ gpio_direction_input(GPIO_PTW7);
+
+ cn12_enabled = true;
+#endif
+
+ if (cn12_enabled)
+ /* I/O buffer drive ability is high for CN12 */
+ __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000,
+ IODRIVEA);
+
/* enable Video */
gpio_request(GPIO_PTU2, NULL);
gpio_direction_output(GPIO_PTU2, 1);
@@ -1305,25 +1346,6 @@ static int __init arch_setup(void)
gpio_request(GPIO_PTU5, NULL);
gpio_direction_output(GPIO_PTU5, 0);
-#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
- /* enable MMCIF (needs DS2.6,7 set to OFF,ON) */
- gpio_request(GPIO_FN_MMC_D7, NULL);
- gpio_request(GPIO_FN_MMC_D6, NULL);
- gpio_request(GPIO_FN_MMC_D5, NULL);
- gpio_request(GPIO_FN_MMC_D4, NULL);
- gpio_request(GPIO_FN_MMC_D3, NULL);
- gpio_request(GPIO_FN_MMC_D2, NULL);
- gpio_request(GPIO_FN_MMC_D1, NULL);
- gpio_request(GPIO_FN_MMC_D0, NULL);
- gpio_request(GPIO_FN_MMC_CLK, NULL);
- gpio_request(GPIO_FN_MMC_CMD, NULL);
- gpio_request(GPIO_PTB7, NULL);
- gpio_direction_output(GPIO_PTB7, 0);
-
- /* I/O buffer drive ability is high for MMCIF */
- __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
-#endif
-
/* enable I2C device */
i2c_register_board_info(0, i2c0_devices,
ARRAY_SIZE(i2c0_devices));
diff --git a/arch/sh/drivers/dma/dma-g2.c b/arch/sh/drivers/dma/dma-g2.c
index be9ca7ca0ce4..e1ab6eb3c04b 100644
--- a/arch/sh/drivers/dma/dma-g2.c
+++ b/arch/sh/drivers/dma/dma-g2.c
@@ -181,14 +181,14 @@ static int __init g2_dma_init(void)
ret = register_dmac(&g2_dma_info);
if (unlikely(ret != 0))
- free_irq(HW_EVENT_G2_DMA, 0);
+ free_irq(HW_EVENT_G2_DMA, &g2_dma_info);
return ret;
}
static void __exit g2_dma_exit(void)
{
- free_irq(HW_EVENT_G2_DMA, 0);
+ free_irq(HW_EVENT_G2_DMA, &g2_dma_info);
unregister_dmac(&g2_dma_info);
}
diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c
index 3d66a32ce610..c0dd904483c7 100644
--- a/arch/sh/drivers/dma/dmabrg.c
+++ b/arch/sh/drivers/dma/dmabrg.c
@@ -189,8 +189,8 @@ static int __init dmabrg_init(void)
if (ret == 0)
return ret;
- free_irq(DMABRGI1, 0);
-out1: free_irq(DMABRGI0, 0);
+ free_irq(DMABRGI1, NULL);
+out1: free_irq(DMABRGI0, NULL);
out0: kfree(dmabrg_handlers);
return ret;
}
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index fb8f14990743..5a6dab6e27d9 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -21,6 +21,13 @@
#include <asm/mmu.h>
#include <asm/sizes.h>
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+# define PCICR_ENDIANNESS SH4_PCICR_BSWP
+#else
+# define PCICR_ENDIANNESS 0
+#endif
+
+
static struct resource sh7785_pci_resources[] = {
{
.name = "PCI IO",
@@ -254,7 +261,7 @@ static int __init sh7780_pci_init(void)
__raw_writel(PCIECR_ENBL, PCIECR);
/* Reset */
- __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST,
+ __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST | PCICR_ENDIANNESS,
chan->reg_base + SH4_PCICR);
/*
@@ -290,7 +297,8 @@ static int __init sh7780_pci_init(void)
* Now throw it in to register initialization mode and
* start the real work.
*/
- __raw_writel(SH4_PCICR_PREFIX, chan->reg_base + SH4_PCICR);
+ __raw_writel(SH4_PCICR_PREFIX | PCICR_ENDIANNESS,
+ chan->reg_base + SH4_PCICR);
memphys = __pa(memory_start);
memsize = roundup_pow_of_two(memory_end - memory_start);
@@ -380,7 +388,8 @@ static int __init sh7780_pci_init(void)
* Initialization mode complete, release the control register and
* enable round robin mode to stop device overruns/starvation.
*/
- __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO,
+ __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO |
+ PCICR_ENDIANNESS,
chan->reg_base + SH4_PCICR);
ret = register_pci_controller(chan);
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 35fc8b077cb1..ec464a6b95fe 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -23,6 +23,7 @@
#define __IO_PREFIX generic
#include <asm/io_generic.h>
#include <asm/io_trapped.h>
+#include <mach/mangle-port.h>
#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
@@ -34,21 +35,15 @@
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
-#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
-#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
- __raw_readw(c)); __v; })
-#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
- __raw_readl(c)); __v; })
-#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64) \
- __raw_readq(c)); __v; })
-
-#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
-#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
- cpu_to_le16(v),c))
-#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
- cpu_to_le32(v),c))
-#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64) \
- cpu_to_le64(v),c))
+#define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; })
+#define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; })
+#define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; })
+#define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; })
+
+#define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c))
+#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c))
+#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c))
+#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c))
#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 45d08b6a5ef7..2a62017eb275 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -21,17 +21,6 @@
#define NO_IRQ_IGNORE ((unsigned int)-1)
/*
- * Convert back and forth between INTEVT and IRQ values.
- */
-#ifdef CONFIG_CPU_HAS_INTEVT
-#define evt2irq(evt) (((evt) >> 5) - 16)
-#define irq2evt(irq) (((irq) + 16) << 5)
-#else
-#define evt2irq(evt) (evt)
-#define irq2evt(irq) (irq)
-#endif
-
-/*
* Simple Mask Register Support
*/
extern void make_maskreg_irq(unsigned int irq);
diff --git a/arch/sh/include/asm/posix_types_32.h b/arch/sh/include/asm/posix_types_32.h
index 6a9ceaaf1aea..abda58467ece 100644
--- a/arch/sh/include/asm/posix_types_32.h
+++ b/arch/sh/include/asm/posix_types_32.h
@@ -12,11 +12,6 @@ typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
#define __kernel_gid_t __kernel_gid_t
-typedef unsigned int __kernel_uid32_t;
-#define __kernel_uid32_t __kernel_uid32_t
-typedef unsigned int __kernel_gid32_t;
-#define __kernel_gid32_t __kernel_gid32_t
-
typedef unsigned short __kernel_old_uid_t;
#define __kernel_old_uid_t __kernel_old_uid_t
typedef unsigned short __kernel_old_gid_t;
diff --git a/arch/sh/include/asm/posix_types_64.h b/arch/sh/include/asm/posix_types_64.h
index 8cd11485c06b..fcda07b4a616 100644
--- a/arch/sh/include/asm/posix_types_64.h
+++ b/arch/sh/include/asm/posix_types_64.h
@@ -17,10 +17,6 @@ typedef int __kernel_ssize_t;
#define __kernel_ssize_t __kernel_ssize_t
typedef int __kernel_ptrdiff_t;
#define __kernel_ptrdiff_t __kernel_ptrdiff_t
-typedef unsigned int __kernel_uid32_t;
-#define __kernel_uid32_t __kernel_uid32_t
-typedef unsigned int __kernel_gid32_t;
-#define __kernel_gid32_t __kernel_gid32_t
typedef unsigned short __kernel_old_uid_t;
#define __kernel_old_uid_t __kernel_old_uid_t
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 65be656ead7d..a42a5610a36a 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -1,9 +1,46 @@
#ifdef __KERNEL__
# ifdef CONFIG_SUPERH32
+
# include "unistd_32.h"
+# define __ARCH_WANT_SYS_RT_SIGSUSPEND
+
# else
# include "unistd_64.h"
# endif
+
+# define __ARCH_WANT_IPC_PARSE_VERSION
+# define __ARCH_WANT_OLD_READDIR
+# define __ARCH_WANT_OLD_STAT
+# define __ARCH_WANT_STAT64
+# define __ARCH_WANT_SYS_ALARM
+# define __ARCH_WANT_SYS_GETHOSTNAME
+# define __ARCH_WANT_SYS_IPC
+# define __ARCH_WANT_SYS_PAUSE
+# define __ARCH_WANT_SYS_SGETMASK
+# define __ARCH_WANT_SYS_SIGNAL
+# define __ARCH_WANT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME
+# define __ARCH_WANT_SYS_WAITPID
+# define __ARCH_WANT_SYS_SOCKETCALL
+# define __ARCH_WANT_SYS_FADVISE64
+# define __ARCH_WANT_SYS_GETPGRP
+# define __ARCH_WANT_SYS_LLSEEK
+# define __ARCH_WANT_SYS_NICE
+# define __ARCH_WANT_SYS_OLD_GETRLIMIT
+# define __ARCH_WANT_SYS_OLD_UNAME
+# define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_SIGPENDING
+# define __ARCH_WANT_SYS_SIGPROCMASK
+# define __ARCH_WANT_SYS_RT_SIGACTION
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+
#else
# ifdef __SH5__
# include "unistd_64.h"
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 152b8627a184..72fd1e061006 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_SH_UNISTD_H
-#define __ASM_SH_UNISTD_H
+#ifndef __ASM_SH_UNISTD_32_H
+#define __ASM_SH_UNISTD_32_H
/*
* Copyright (C) 1999 Niibe Yutaka
@@ -26,7 +26,7 @@
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
-#define __NR_break 17
+ /* 17 was sys_break */
#define __NR_oldstat 18
#define __NR_lseek 19
#define __NR_getpid 20
@@ -40,11 +40,11 @@
#define __NR_oldfstat 28
#define __NR_pause 29
#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
+ /* 31 was sys_stty */
+ /* 32 was sys_gtty */
#define __NR_access 33
#define __NR_nice 34
-#define __NR_ftime 35
+ /* 35 was sys_ftime */
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
@@ -53,7 +53,7 @@
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
-#define __NR_prof 44
+ /* 44 was sys_prof */
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
@@ -62,13 +62,13 @@
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
-#define __NR_lock 53
+ /* 53 was sys_lock */
#define __NR_ioctl 54
#define __NR_fcntl 55
-#define __NR_mpx 56
+ /* 56 was sys_mpx */
#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
+ /* 58 was sys_ulimit */
+ /* 59 was sys_olduname */
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
@@ -91,7 +91,7 @@
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
-#define __NR_select 82
+ /* 82 was sys_oldselect */
#define __NR_symlink 83
#define __NR_oldlstat 84
#define __NR_readlink 85
@@ -107,10 +107,10 @@
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
-#define __NR_profil 98
+ /* 98 was sys_profil */
#define __NR_statfs 99
#define __NR_fstatfs 100
-#define __NR_ioperm 101
+ /* 101 was sys_ioperm */
#define __NR_socketcall 102
#define __NR_syslog 103
#define __NR_setitimer 104
@@ -119,10 +119,10 @@
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_olduname 109
-#define __NR_iopl 110
+ /* 110 was sys_iopl */
#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86old 113
+ /* 112 was sys_idle */
+ /* 113 was sys_vm86old */
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
@@ -136,17 +136,17 @@
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
-#define __NR_create_module 127
+ /* 127 was sys_create_module */
#define __NR_init_module 128
#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
+ /* 130 was sys_get_kernel_syms */
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+ /* 137 was sys_afs_syscall */
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
@@ -175,8 +175,8 @@
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
-#define __NR_vm86 166
-#define __NR_query_module 167
+ /* 166 was sys_vm86 */
+ /* 167 was sys_query_module */
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
@@ -197,8 +197,8 @@
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
-#define __NR_streams1 188 /* some people actually want it */
-#define __NR_streams2 189 /* some people actually want it */
+ /* 188 reserved for sys_getpmsg */
+ /* 189 reserved for sys_putpmsg */
#define __NR_vfork 190
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
#define __NR_mmap2 192
@@ -231,7 +231,8 @@
#define __NR_madvise 219
#define __NR_getdents64 220
#define __NR_fcntl64 221
-/* 223 is unused */
+ /* 222 is reserved for tux */
+ /* 223 is unused */
#define __NR_gettid 224
#define __NR_readahead 225
#define __NR_setxattr 226
@@ -251,15 +252,15 @@
#define __NR_futex 240
#define __NR_sched_setaffinity 241
#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area 243
-#define __NR_get_thread_area 244
+ /* 243 is reserved for set_thread_area */
+ /* 244 is reserved for get_thread_area */
#define __NR_io_setup 245
#define __NR_io_destroy 246
#define __NR_io_getevents 247
#define __NR_io_submit 248
#define __NR_io_cancel 249
#define __NR_fadvise64 250
-
+ /* 251 is unused */
#define __NR_exit_group 252
#define __NR_lookup_dcookie 253
#define __NR_epoll_create 254
@@ -281,7 +282,7 @@
#define __NR_tgkill 270
#define __NR_utimes 271
#define __NR_fadvise64_64 272
-#define __NR_vserver 273
+ /* 273 is reserved for vserver */
#define __NR_mbind 274
#define __NR_get_mempolicy 275
#define __NR_set_mempolicy 276
@@ -301,7 +302,7 @@
#define __NR_inotify_init 290
#define __NR_inotify_add_watch 291
#define __NR_inotify_rm_watch 292
-/* 293 is unused */
+ /* 293 is unused */
#define __NR_migrate_pages 294
#define __NR_openat 295
#define __NR_mkdirat 296
@@ -380,43 +381,4 @@
#define NR_syscalls 367
-#ifdef __KERNEL__
-
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_IPC
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_UNAME
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#ifndef cond_syscall
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_SH_UNISTD_H */
+#endif /* __ASM_SH_UNISTD_32_H */
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index c330c23db5a0..a28edc329692 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -31,7 +31,7 @@
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
-#define __NR_break 17
+ /* 17 was sys_break */
#define __NR_oldstat 18
#define __NR_lseek 19
#define __NR_getpid 20
@@ -45,11 +45,11 @@
#define __NR_oldfstat 28
#define __NR_pause 29
#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
+ /* 31 was sys_stty */
+ /* 32 was sys_gtty */
#define __NR_access 33
#define __NR_nice 34
-#define __NR_ftime 35
+ /* 35 was sys_ftime */
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
@@ -58,7 +58,7 @@
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
-#define __NR_prof 44
+ /* 44 was sys_prof */
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
@@ -67,13 +67,13 @@
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
-#define __NR_lock 53
+ /* 53 was sys_lock */
#define __NR_ioctl 54
#define __NR_fcntl 55
-#define __NR_mpx 56
+ /* 56 was sys_mpx */
#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
+ /* 58 was sys_ulimit */
+ /* 59 was sys_olduname */
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
@@ -96,7 +96,7 @@
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
-#define __NR_select 82
+ /* 82 was sys_select */
#define __NR_symlink 83
#define __NR_oldlstat 84
#define __NR_readlink 85
@@ -112,10 +112,10 @@
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
-#define __NR_profil 98
+ /* 98 was sys_profil */
#define __NR_statfs 99
#define __NR_fstatfs 100
-#define __NR_ioperm 101
+ /* 101 was sys_ioperm */
#define __NR_socketcall 102 /* old implementation of socket systemcall */
#define __NR_syslog 103
#define __NR_setitimer 104
@@ -124,10 +124,10 @@
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_olduname 109
-#define __NR_iopl 110
+ /* 110 was sys_iopl */
#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86old 113
+ /* 112 was sys_idle */
+ /* 113 was sys_vm86old */
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
@@ -141,17 +141,17 @@
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
-#define __NR_create_module 127
+ /* 127 was sys_create_module */
#define __NR_init_module 128
#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
+ /* 130 was sys_get_kernel_syms */
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+ /* 137 was sys_afs_syscall */
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
@@ -180,8 +180,8 @@
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
-#define __NR_vm86 166
-#define __NR_query_module 167
+ /* 166 was sys_vm86 */
+ /* 167 was sys_query_module */
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
@@ -202,8 +202,8 @@
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
-#define __NR_streams1 188 /* some people actually want it */
-#define __NR_streams2 189 /* some people actually want it */
+ /* 188 reserved for getpmsg */
+ /* 189 reserved for putpmsg */
#define __NR_vfork 190
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
#define __NR_mmap2 192
@@ -262,16 +262,15 @@
#define __NR_msgrcv 241
#define __NR_msgget 242
#define __NR_msgctl 243
-#if 0
-#define __NR_shmatcall 244
-#endif
+#define __NR_shmat 244
#define __NR_shmdt 245
#define __NR_shmget 246
#define __NR_shmctl 247
#define __NR_getdents64 248
#define __NR_fcntl64 249
-/* 223 is unused */
+ /* 250 is reserved for tux */
+ /* 251 is unused */
#define __NR_gettid 252
#define __NR_readahead 253
#define __NR_setxattr 254
@@ -291,14 +290,15 @@
#define __NR_futex 268
#define __NR_sched_setaffinity 269
#define __NR_sched_getaffinity 270
-#define __NR_set_thread_area 271
-#define __NR_get_thread_area 272
+ /* 271 is reserved for set_thread_area */
+ /* 272 is reserved for get_thread_area */
#define __NR_io_setup 273
#define __NR_io_destroy 274
#define __NR_io_getevents 275
#define __NR_io_submit 276
#define __NR_io_cancel 277
#define __NR_fadvise64 278
+ /* 279 is unused */
#define __NR_exit_group 280
#define __NR_lookup_dcookie 281
@@ -321,17 +321,17 @@
#define __NR_tgkill 298
#define __NR_utimes 299
#define __NR_fadvise64_64 300
-#define __NR_vserver 301
-#define __NR_mbind 302
-#define __NR_get_mempolicy 303
-#define __NR_set_mempolicy 304
+ /* 301 is reserved for vserver */
+ /* 302 is reserved for mbind */
+ /* 303 is reserved for get_mempolicy */
+ /* 304 is reserved for set_mempolicy */
#define __NR_mq_open 305
#define __NR_mq_unlink (__NR_mq_open+1)
#define __NR_mq_timedsend (__NR_mq_open+2)
#define __NR_mq_timedreceive (__NR_mq_open+3)
#define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 311
+ /* 311 is reserved for kexec */
#define __NR_waitid 312
#define __NR_add_key 313
#define __NR_request_key 314
@@ -341,7 +341,7 @@
#define __NR_inotify_init 318
#define __NR_inotify_add_watch 319
#define __NR_inotify_rm_watch 320
-/* 321 is unused */
+ /* 321 is unused */
#define __NR_migrate_pages 322
#define __NR_openat 323
#define __NR_mkdirat 324
@@ -399,44 +399,6 @@
#define __NR_process_vm_readv 376
#define __NR_process_vm_writev 377
-#ifdef __KERNEL__
-
#define NR_syscalls 378
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_IPC
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_UNAME
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#ifndef cond_syscall
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif
-
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
index 18fa80aba15e..02788b6a03b7 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-register.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -16,45 +16,29 @@
#define DMAOR_INIT DMAOR_DME
-#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \
- defined(CONFIG_CPU_SUBTYPE_SH7730)
+#if defined(CONFIG_CPU_SUBTYPE_SH7343)
#define CHCR_TS_LOW_MASK 0x00000018
#define CHCR_TS_LOW_SHIFT 3
#define CHCR_TS_HIGH_MASK 0
#define CHCR_TS_HIGH_SHIFT 0
#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7723) || \
defined(CONFIG_CPU_SUBTYPE_SH7724) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7730) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
#define CHCR_TS_LOW_MASK 0x00000018
#define CHCR_TS_LOW_SHIFT 3
#define CHCR_TS_HIGH_MASK 0x00300000
#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7764)
-#define CHCR_TS_LOW_MASK 0x00000018
-#define CHCR_TS_LOW_SHIFT 3
-#define CHCR_TS_HIGH_MASK 0
-#define CHCR_TS_HIGH_SHIFT 0
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
-#define CHCR_TS_LOW_MASK 0x00000018
-#define CHCR_TS_LOW_SHIFT 3
-#define CHCR_TS_HIGH_MASK 0
-#define CHCR_TS_HIGH_SHIFT 0
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785)
#define CHCR_TS_LOW_MASK 0x00000018
#define CHCR_TS_LOW_SHIFT 3
#define CHCR_TS_HIGH_MASK 0x00100000
#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-#define CHCR_TS_LOW_MASK 0x00000018
-#define CHCR_TS_LOW_SHIFT 3
-#define CHCR_TS_HIGH_MASK 0
-#define CHCR_TS_HIGH_SHIFT 0
-#else /* SH7785 */
-#define CHCR_TS_LOW_MASK 0x00000018
-#define CHCR_TS_LOW_SHIFT 3
-#define CHCR_TS_HIGH_MASK 0
-#define CHCR_TS_HIGH_SHIFT 0
#endif
/* Transmit sizes and respective CHCR register values */
diff --git a/arch/sh/include/mach-common/mach/mangle-port.h b/arch/sh/include/mach-common/mach/mangle-port.h
new file mode 100644
index 000000000000..4ca1769a0f12
--- /dev/null
+++ b/arch/sh/include/mach-common/mach/mangle-port.h
@@ -0,0 +1,49 @@
+/*
+ * SH version cribbed from the MIPS copy:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __MACH_COMMON_MANGLE_PORT_H
+#define __MACH_COMMON_MANGLE_PORT_H
+
+/*
+ * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
+ * less sane hardware forces software to fiddle with this...
+ *
+ * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
+ * you can't have the numerical value of data and byte addresses within
+ * multibyte quantities both preserved at the same time. Hence two
+ * variations of functions: non-prefixed ones that preserve the value
+ * and prefixed ones that preserve byte addresses. The latters are
+ * typically used for moving raw data between a peripheral and memory (cf.
+ * string I/O functions), hence the "__mem_" prefix.
+ */
+#if defined(CONFIG_SWAP_IO_SPACE)
+
+# define ioswabb(x) (x)
+# define __mem_ioswabb(x) (x)
+# define ioswabw(x) le16_to_cpu(x)
+# define __mem_ioswabw(x) (x)
+# define ioswabl(x) le32_to_cpu(x)
+# define __mem_ioswabl(x) (x)
+# define ioswabq(x) le64_to_cpu(x)
+# define __mem_ioswabq(x) (x)
+
+#else
+
+# define ioswabb(x) (x)
+# define __mem_ioswabb(x) (x)
+# define ioswabw(x) (x)
+# define __mem_ioswabw(x) cpu_to_le16(x)
+# define ioswabl(x) (x)
+# define __mem_ioswabl(x) cpu_to_le32(x)
+# define ioswabq(x) (x)
+# define __mem_ioswabq(x) cpu_to_le32(x)
+
+#endif
+
+#endif /* __MACH_COMMON_MANGLE_PORT_H */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 2875e8be4f72..c8836cffa216 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -680,6 +680,25 @@ static struct platform_device spi1_device = {
.resource = spi1_resources,
};
+static struct resource rspi_resources[] = {
+ {
+ .start = 0xfe480000,
+ .end = 0xfe4800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 220,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rspi_device = {
+ .name = "rspi",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(rspi_resources),
+ .resource = rspi_resources,
+};
+
static struct resource usb_ehci_resources[] = {
[0] = {
.start = 0xfe4f1000,
@@ -740,6 +759,7 @@ static struct platform_device *sh7757_devices[] __initdata = {
&dma3_device,
&spi0_device,
&spi1_device,
+ &rspi_device,
&usb_ehci_device,
&usb_ohci_device,
};
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 6d62eb40e750..1ddc876d3b26 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -29,7 +29,6 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
int index)
{
unsigned long allowed_mode = SUSP_SH_SLEEP;
- ktime_t before, after;
int requested_state = index;
int allowed_state;
int k;
@@ -47,19 +46,16 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
*/
k = min_t(int, allowed_state, requested_state);
- before = ktime_get();
sh_mobile_call_standby(cpuidle_mode[k]);
- after = ktime_get();
-
- dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
return k;
}
static struct cpuidle_device cpuidle_dev;
static struct cpuidle_driver cpuidle_driver = {
- .name = "sh_idle",
- .owner = THIS_MODULE,
+ .name = "sh_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
};
void sh_mobile_setup_cpuidle(void)
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index 0fffacea6ed9..e68b45b6f3f9 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -3,7 +3,7 @@
*
* cpufreq driver for the SuperH processors.
*
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2012 Paul Mundt
* Copyright (C) 2002 M. R. Brown
*
* Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
@@ -14,6 +14,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define pr_fmt(fmt) "cpufreq: " fmt
+
#include <linux/types.h>
#include <linux/cpufreq.h>
#include <linux/kernel.h>
@@ -21,15 +23,18 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/cpumask.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/sched.h> /* set_cpus_allowed() */
#include <linux/clk.h>
+#include <linux/percpu.h>
+#include <linux/sh_clk.h>
-static struct clk *cpuclk;
+static DEFINE_PER_CPU(struct clk, sh_cpuclk);
static unsigned int sh_cpufreq_get(unsigned int cpu)
{
- return (clk_get_rate(cpuclk) + 500) / 1000;
+ return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
}
/*
@@ -40,8 +45,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
+ struct device *dev;
long freq;
if (!cpu_online(cpu))
@@ -52,13 +59,15 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
BUG_ON(smp_processor_id() != cpu);
+ dev = get_cpu_device(cpu);
+
/* Convert target_freq from kHz to Hz */
freq = clk_round_rate(cpuclk, target_freq * 1000);
if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
return -EINVAL;
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+ dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
freqs.cpu = cpu;
freqs.old = sh_cpufreq_get(cpu);
@@ -70,78 +79,112 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
clk_set_rate(cpuclk, freq);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+ dev_dbg(dev, "set frequency %lu Hz\n", freq);
+
+ return 0;
+}
+
+static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
+ struct cpufreq_frequency_table *freq_table;
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
return 0;
}
static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- if (!cpu_online(policy->cpu))
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ struct cpufreq_frequency_table *freq_table;
+ struct device *dev;
+
+ if (!cpu_online(cpu))
return -ENODEV;
- cpuclk = clk_get(NULL, "cpu_clk");
+ dev = get_cpu_device(cpu);
+
+ cpuclk = clk_get(dev, "cpu_clk");
if (IS_ERR(cpuclk)) {
- printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n",
- policy->cpu);
+ dev_err(dev, "couldn't get CPU clk\n");
return PTR_ERR(cpuclk);
}
- /* cpuinfo and default policy values */
- policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
- policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu);
- policy->cur = sh_cpufreq_get(policy->cpu);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table) {
+ int result;
- /*
- * Catch the cases where the clock framework hasn't been wired up
- * properly to support scaling.
- */
- if (unlikely(policy->min == policy->max)) {
- printk(KERN_ERR "cpufreq: clock framework rate rounding "
- "not supported on CPU#%d.\n", policy->cpu);
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!result)
+ cpufreq_frequency_table_get_attr(freq_table, cpu);
+ } else {
+ dev_notice(dev, "no frequency table found, falling back "
+ "to rate rounding.\n");
- clk_put(cpuclk);
- return -EINVAL;
+ policy->cpuinfo.min_freq =
+ (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->cpuinfo.max_freq =
+ (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
}
- printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, "
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
"Maximum %u.%03u MHz.\n",
- policy->cpu, policy->min / 1000, policy->min % 1000,
+ policy->min / 1000, policy->min % 1000,
policy->max / 1000, policy->max % 1000);
return 0;
}
-static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
-static int sh_cpufreq_exit(struct cpufreq_policy *policy)
-{
+ cpufreq_frequency_table_put_attr(cpu);
clk_put(cpuclk);
+
return 0;
}
+static struct freq_attr *sh_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
static struct cpufreq_driver sh_cpufreq_driver = {
.owner = THIS_MODULE,
.name = "sh",
- .init = sh_cpufreq_cpu_init,
- .verify = sh_cpufreq_verify,
- .target = sh_cpufreq_target,
.get = sh_cpufreq_get,
- .exit = sh_cpufreq_exit,
+ .target = sh_cpufreq_target,
+ .verify = sh_cpufreq_verify,
+ .init = sh_cpufreq_cpu_init,
+ .exit = sh_cpufreq_cpu_exit,
+ .attr = sh_freq_attr,
};
static int __init sh_cpufreq_module_init(void)
{
- printk(KERN_INFO "cpufreq: SuperH CPU frequency driver.\n");
+ pr_notice("SuperH CPU frequency driver.\n");
return cpufreq_register_driver(&sh_cpufreq_driver);
}
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 0bc58866add1..5901fba3176e 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -57,12 +57,13 @@ sys_sigsuspend(old_sigset_t mask,
unsigned long r5, unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -239,11 +240,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &r0))
goto badframe;
@@ -273,10 +270,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
@@ -547,17 +541,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
else
ret = setup_frame(sig, ka, oldset, regs);
- if (ka->sa.sa_flags & SA_ONESHOT)
- ka->sa.sa_handler = SIG_DFL;
-
- if (ret == 0) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ if (ret == 0)
+ block_sigmask(ka, sig);
return ret;
}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 6b5603fe274b..3c9a6f7dcdce 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -159,14 +159,13 @@ sys_sigsuspend(old_sigset_t mask,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
- sigset_t saveset;
+ sigset_t saveset, blocked;
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
REF_REG_RET = -EINTR;
while (1) {
@@ -198,11 +197,8 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&newset);
REF_REG_RET = -EINTR;
while (1) {
@@ -408,11 +404,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &ret))
goto badframe;
@@ -445,10 +437,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
goto badframe;
@@ -734,17 +723,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
else
ret = setup_frame(sig, ka, oldset, regs);
- if (ka->sa.sa_flags & SA_ONESHOT)
- ka->sa.sa_handler = SIG_DFL;
-
- if (ret == 0) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ if (ret == 0)
+ block_sigmask(ka, sig);
return ret;
}
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index ee56a9b1a981..4b68f0f79761 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -204,8 +204,8 @@ ENTRY(sys_call_table)
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
- .long sys_ni_syscall /* streams1 */
- .long sys_ni_syscall /* streams2 */
+ .long sys_ni_syscall /* getpmsg */
+ .long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
@@ -259,8 +259,8 @@ ENTRY(sys_call_table)
.long sys_futex /* 240 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
- .long sys_ni_syscall
- .long sys_ni_syscall
+ .long sys_ni_syscall /* reserved for set_thread_area */
+ .long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup /* 245 */
.long sys_io_destroy
.long sys_io_getevents
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 9af7de26fb71..0956345b36ef 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -208,8 +208,8 @@ sys_call_table:
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
- .long sys_ni_syscall /* streams1 */
- .long sys_ni_syscall /* streams2 */
+ .long sys_ni_syscall /* getpmsg */
+ .long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
@@ -296,8 +296,8 @@ sys_call_table:
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
- .long sys_ni_syscall
- .long sys_ni_syscall
+ .long sys_ni_syscall /* reserved for set_thread_area */
+ .long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1666de84d477..6c0683d3fcba 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -577,6 +577,7 @@ config COMPAT
depends on SPARC64
default y
select COMPAT_BINFMT_ELF
+ select ARCH_WANT_OLD_COMPAT_IPC
config SYSVIPC_COMPAT
bool
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h
index dbfc1a34b3a2..3070f25ae90a 100644
--- a/arch/sparc/include/asm/posix_types.h
+++ b/arch/sparc/include/asm/posix_types.h
@@ -9,35 +9,16 @@
#if defined(__sparc__) && defined(__arch64__)
/* sparc 64 bit */
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
-typedef int __kernel_daddr_t;
-typedef long __kernel_off_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_timer_t;
+#define __kernel_nlink_t __kernel_nlink_t
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-
-typedef unsigned int __kernel_old_dev_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
/* Note this piece of asymmetry from the v9 ABI. */
typedef int __kernel_suseconds_t;
+#define __kernel_suseconds_t __kernel_suseconds_t
#else
/* sparc 32 bit */
@@ -45,109 +26,29 @@ typedef int __kernel_suseconds_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef long int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_pid_t;
+#define __kernel_size_t __kernel_size_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned long __kernel_ino_t;
+#define __kernel_uid_t __kernel_uid_t
+
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef short __kernel_nlink_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef long __kernel_daddr_t;
-typedef long __kernel_off_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
+#define __kernel_daddr_t __kernel_daddr_t
+
typedef unsigned short __kernel_old_dev_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_timer_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
#endif /* defined(__sparc__) && defined(__arch64__) */
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant cases (8 or 32 longs,
- * for 256 and 1024-bit fd_sets respectively)
- */
-#undef __FD_ZERO
-static inline void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 32:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
- tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
- tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
- tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
- return;
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
+#include <asm-generic/posix_types.h>
-#endif /* __KERNEL__ */
#endif /* __SPARC_POSIX_TYPES_H */
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index ef8c7c068f53..9835adc163fd 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -165,6 +165,7 @@ struct sparc_stackf {
#ifdef __KERNEL__
#include <linux/threads.h>
+#include <asm/switch_to.h>
static inline int pt_regs_trap_type(struct pt_regs *regs)
{
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 971fd435a281..48565c11e82a 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -6,6 +6,8 @@
#include <linux/jump_label.h>
#include <linux/memory.h>
+#include <asm/cacheflush.h>
+
#ifdef HAVE_JUMP_LABEL
void arch_jump_label_transform(struct jump_entry *entry,
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 768290a6c028..c8759550799f 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -7,6 +7,7 @@
#include <linux/kdebug.h>
#include <linux/ftrace.h>
+#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index bf95f55b82b0..4b4b28969a65 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -242,17 +242,6 @@ long compat_sys_fallocate(int fd, int mode,
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
-/* Versions of compat functions that differ from generic Linux. */
-struct compat_msgbuf;
-long tile_compat_sys_msgsnd(int msqid,
- struct compat_msgbuf __user *msgp,
- size_t msgsz, int msgflg);
-long tile_compat_sys_msgrcv(int msqid,
- struct compat_msgbuf __user *msgp,
- size_t msgsz, long msgtyp, int msgflg);
-long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- compat_long_t addr, compat_long_t data);
-
/* Tilera Linux syscalls that don't have "compat" versions. */
#define compat_sys_flush_cache sys_flush_cache
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index bf5e9d70266c..d67459b9ac2a 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -16,7 +16,6 @@
#define __SYSCALL_COMPAT
#include <linux/compat.h>
-#include <linux/msg.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/fs.h>
@@ -95,52 +94,10 @@ long compat_sys_sched_rr_get_interval(compat_pid_t pid,
return ret;
}
-/*
- * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming
- * some different calling convention than our normal 32-bit tile code.
- */
-
-/* Already defined in ipc/compat.c, but we need it here. */
-struct compat_msgbuf {
- compat_long_t mtype;
- char mtext[1];
-};
-
-long tile_compat_sys_msgsnd(int msqid,
- struct compat_msgbuf __user *msgp,
- size_t msgsz, int msgflg)
-{
- compat_long_t mtype;
-
- if (get_user(mtype, &msgp->mtype))
- return -EFAULT;
- return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
-}
-
-long tile_compat_sys_msgrcv(int msqid,
- struct compat_msgbuf __user *msgp,
- size_t msgsz, long msgtyp, int msgflg)
-{
- long err, mtype;
-
- err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
- if (err < 0)
- goto out;
-
- if (put_user(mtype, &msgp->mtype))
- err = -EFAULT;
- out:
- return err;
-}
-
/* Provide the compat syscall number to call mapping. */
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
-/* The generic versions of these don't work for Tile. */
-#define compat_sys_msgrcv tile_compat_sys_msgrcv
-#define compat_sys_msgsnd tile_compat_sys_msgsnd
-
/* See comments in sys.c */
#define compat_sys_fadvise64_64 sys32_fadvise64_64
#define compat_sys_readahead sys32_readahead
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3ad653de7100..1d14cc6b79ad 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -69,7 +69,6 @@ config X86
select HAVE_ARCH_JUMP_LABEL
select HAVE_TEXT_POKE_SMP
select HAVE_GENERIC_HARDIRQS
- select HAVE_SPARSE_IRQ
select SPARSE_IRQ
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
@@ -2164,9 +2163,9 @@ config IA32_EMULATION
depends on X86_64
select COMPAT_BINFMT_ELF
---help---
- Include code to run 32-bit programs under a 64-bit kernel. You should
- likely turn this on, unless you're 100% sure that you don't have any
- 32-bit programs left.
+ Include code to run legacy 32-bit programs under a
+ 64-bit kernel. You should likely turn this on, unless you're
+ 100% sure that you don't have any 32-bit programs left.
config IA32_AOUT
tristate "IA32 a.out support"
@@ -2174,9 +2173,23 @@ config IA32_AOUT
---help---
Support old a.out binaries in the 32bit emulation.
+config X86_X32
+ bool "x32 ABI for 64-bit mode (EXPERIMENTAL)"
+ depends on X86_64 && IA32_EMULATION && EXPERIMENTAL
+ ---help---
+ Include code to run binaries for the x32 native 32-bit ABI
+ for 64-bit processors. An x32 process gets access to the
+ full 64-bit register file and wide data path while leaving
+ pointers at 32 bits for smaller memory footprint.
+
+ You will need a recent binutils (2.22 or later) with
+ elf32_x86_64 support enabled to compile a kernel with this
+ option set.
+
config COMPAT
def_bool y
- depends on IA32_EMULATION
+ depends on IA32_EMULATION || X86_X32
+ select ARCH_WANT_OLD_COMPAT_IPC
config COMPAT_FOR_U64_ALIGNMENT
def_bool COMPAT
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 209ba1294592..968dbe24a255 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -82,6 +82,22 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
endif
+ifdef CONFIG_X86_X32
+ x32_ld_ok := $(call try-run,\
+ /bin/echo -e '1: .quad 1b' | \
+ $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
+ $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
+ $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
+ ifeq ($(x32_ld_ok),y)
+ CONFIG_X86_X32_ABI := y
+ KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI
+ KBUILD_CFLAGS += -DCONFIG_X86_X32_ABI
+ else
+ $(warning CONFIG_X86_X32 enabled but no binutils support)
+ endif
+endif
+export CONFIG_X86_X32_ABI
+
# Don't unroll struct assignments with kmemcheck enabled
ifeq ($(CONFIG_KMEMCHECK),y)
KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 2bf18059fbea..119db67dcb03 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -15,23 +15,28 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_SCHED=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-CONFIG_NET_NS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
-CONFIG_SPARSE_IRQ=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
CONFIG_SCHED_SMT=y
@@ -51,14 +56,12 @@ CONFIG_HZ_1000=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
# CONFIG_COMPAT_VDSO is not set
-CONFIG_PM=y
+CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE_RTC=y
-CONFIG_HIBERNATION=y
CONFIG_ACPI_PROCFS=y
CONFIG_ACPI_DOCK=y
CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEBUG=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
@@ -69,7 +72,6 @@ CONFIG_PCI_MSI=y
CONFIG_PCCARD=y
CONFIG_YENTA=y
CONFIG_HOTPLUG_PCI=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -120,7 +122,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_LOG=y
CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
@@ -128,7 +129,6 @@ CONFIG_IP_NF_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_IPV6HEADER=y
-CONFIG_IP6_NF_TARGET_LOG=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
@@ -169,25 +169,20 @@ CONFIG_DM_ZERO=y
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
+CONFIG_NETCONSOLE=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
CONFIG_NET_TULIP=y
-CONFIG_NET_PCI=y
-CONFIG_FORCEDETH=y
CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
-CONFIG_E1000=y
-CONFIG_E1000E=y
CONFIG_R8169=y
-CONFIG_SKY2=y
-CONFIG_TIGON3=y
-CONFIG_BNX2=y
-CONFIG_TR=y
-CONFIG_NET_PCMCIA=y
CONFIG_FDDI=y
-CONFIG_NETCONSOLE=y
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@@ -196,6 +191,7 @@ CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -205,7 +201,6 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_HPET=y
@@ -220,7 +215,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
@@ -283,7 +277,6 @@ CONFIG_ZISOFS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=y
@@ -291,18 +284,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
@@ -317,13 +298,12 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_STACK_USAGE=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_NX_TEST=m
CONFIG_DEBUG_BOOT_PARAMS=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 058a35b8286c..76eb2903809f 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,4 +1,3 @@
-CONFIG_64BIT=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
@@ -16,26 +15,29 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_SCHED=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-CONFIG_NET_NS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CALGARY_IOMMU=y
-CONFIG_AMD_IOMMU=y
-CONFIG_AMD_IOMMU_STATS=y
CONFIG_NR_CPUS=64
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_VOLUNTARY=y
@@ -53,27 +55,22 @@ CONFIG_HZ_1000=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
# CONFIG_COMPAT_VDSO is not set
-CONFIG_PM=y
+CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE_RTC=y
-CONFIG_HIBERNATION=y
CONFIG_ACPI_PROCFS=y
CONFIG_ACPI_DOCK=y
CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEBUG=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_X86_ACPI_CPUFREQ=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_INTEL_IOMMU=y
-# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_PCIEPORTBUS=y
CONFIG_PCCARD=y
CONFIG_YENTA=y
CONFIG_HOTPLUG_PCI=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_BINFMT_MISC=y
CONFIG_IA32_EMULATION=y
CONFIG_NET=y
@@ -125,7 +122,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_LOG=y
CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
@@ -133,7 +129,6 @@ CONFIG_IP_NF_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_IPV6HEADER=y
-CONFIG_IP6_NF_TARGET_LOG=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
@@ -172,20 +167,15 @@ CONFIG_DM_ZERO=y
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
CONFIG_NET_TULIP=y
-CONFIG_NET_PCI=y
-CONFIG_FORCEDETH=y
CONFIG_E100=y
-CONFIG_8139TOO=y
CONFIG_E1000=y
CONFIG_SKY2=y
-CONFIG_TIGON3=y
-CONFIG_TR=y
-CONFIG_NET_PCMCIA=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
CONFIG_FDDI=y
-CONFIG_NETCONSOLE=y
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@@ -194,6 +184,7 @@ CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -203,7 +194,6 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_INTEL is not set
# CONFIG_HW_RANDOM_AMD is not set
@@ -221,7 +211,6 @@ CONFIG_DRM_I915_KMS=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
@@ -268,6 +257,10 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_DMADEVICES=y
CONFIG_EEEPC_LAPTOP=y
+CONFIG_AMD_IOMMU=y
+CONFIG_AMD_IOMMU_STATS=y
+CONFIG_INTEL_IOMMU=y
+# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_EFI_VARS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -284,7 +277,6 @@ CONFIG_ZISOFS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=y
@@ -292,18 +284,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
@@ -317,13 +297,12 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_STACK_USAGE=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_NX_TEST=m
CONFIG_DEBUG_BOOT_PARAMS=y
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 5563ba1cf513..a69245ba27e3 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -12,10 +12,8 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
-#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
-#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/personality.h>
@@ -32,20 +30,15 @@
#include <asm/proto.h>
#include <asm/vdso.h>
#include <asm/sigframe.h>
+#include <asm/sighandling.h>
#include <asm/sys_ia32.h>
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
- X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
- X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
- X86_EFLAGS_CF)
-
-void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+#define FIX_EFLAGS __FIX_EFLAGS
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err = 0;
+ bool ia32 = is_ia32_task();
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
@@ -75,8 +68,13 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
case __SI_FAULT >> 16:
break;
case __SI_CHLD >> 16:
- put_user_ex(from->si_utime, &to->si_utime);
- put_user_ex(from->si_stime, &to->si_stime);
+ if (ia32) {
+ put_user_ex(from->si_utime, &to->si_utime);
+ put_user_ex(from->si_stime, &to->si_stime);
+ } else {
+ put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
+ put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
+ }
put_user_ex(from->si_status, &to->si_status);
/* FALL THROUGH */
default:
@@ -348,7 +346,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
put_user_ex(regs->dx, &sc->dx);
put_user_ex(regs->cx, &sc->cx);
put_user_ex(regs->ax, &sc->ax);
- put_user_ex(current->thread.trap_no, &sc->trapno);
+ put_user_ex(current->thread.trap_nr, &sc->trapno);
put_user_ex(current->thread.error_code, &sc->err);
put_user_ex(regs->ip, &sc->ip);
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index f6f5c53dc903..aec2202a596c 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -287,46 +287,6 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
return ret;
}
-asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset,
- unsigned int sigsetsize)
-{
- sigset_t s;
- compat_sigset_t s32;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- if (set) {
- if (copy_from_user(&s32, set, sizeof(compat_sigset_t)))
- return -EFAULT;
- switch (_NSIG_WORDS) {
- case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
- case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
- case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
- case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- }
- }
- set_fs(KERNEL_DS);
- ret = sys_rt_sigprocmask(how,
- set ? (sigset_t __user *)&s : NULL,
- oset ? (sigset_t __user *)&s : NULL,
- sigsetsize);
- set_fs(old_fs);
- if (ret)
- return ret;
- if (oset) {
- switch (_NSIG_WORDS) {
- case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
- case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
- case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
- case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
- }
- if (copy_to_user(oset, &s32, sizeof(compat_sigset_t)))
- return -EFAULT;
- }
- return 0;
-}
-
asmlinkage long sys32_alarm(unsigned int seconds)
{
return alarm_setitimer(seconds);
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b57e6a43a37a..f9c0d3ba9e84 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -14,6 +14,7 @@ header-y += msr.h
header-y += mtrr.h
header-y += posix_types_32.h
header-y += posix_types_64.h
+header-y += posix_types_x32.h
header-y += prctl.h
header-y += processor-flags.h
header-y += ptrace-abi.h
@@ -24,3 +25,4 @@ header-y += vsyscall.h
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
+genhdr-y += unistd_x32.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 4b2caeefe1a2..d85410171260 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -534,7 +534,7 @@ static inline unsigned int read_apic_id(void)
static inline int default_apic_id_valid(int apicid)
{
- return x2apic_mode || (apicid < 255);
+ return (apicid < 255);
}
extern void default_setup_apic_routing(void);
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 30d737ef2a42..d6805798d6fc 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -6,7 +6,9 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
+#include <asm/processor.h>
#include <asm/user32.h>
+#include <asm/unistd.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0"
@@ -186,7 +188,20 @@ struct compat_shmid64_ds {
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
+#ifdef CONFIG_X86_X32_ABI
+typedef struct user_regs_struct compat_elf_gregset_t;
+
+#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
+#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
+#define SET_PR_FPVALID(S,V) \
+ do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
+ while (0)
+
+#define COMPAT_USE_64BIT_TIME \
+ (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
+#else
typedef struct user_regs_struct32 compat_elf_gregset_t;
+#endif
/*
* A pointer passed in from user mode. This should not
@@ -208,13 +223,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
static inline void __user *arch_compat_alloc_user_space(long len)
{
- struct pt_regs *regs = task_pt_regs(current);
- return (void __user *)regs->sp - len;
+ compat_uptr_t sp;
+
+ if (test_thread_flag(TIF_IA32)) {
+ sp = task_pt_regs(current)->sp;
+ } else {
+ /* -128 for the x32 ABI redzone */
+ sp = percpu_read(old_rsp) - 128;
+ }
+
+ return (void __user *)round_down(sp - len, 16);
+}
+
+static inline bool is_x32_task(void)
+{
+#ifdef CONFIG_X86_X32_ABI
+ if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
+ return true;
+#endif
+ return false;
}
-static inline int is_compat_task(void)
+static inline bool is_compat_task(void)
{
- return current_thread_info()->status & TS_COMPAT;
+ return is_ia32_task() || is_x32_task();
}
#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f27f79abe021..5939f44fe0c0 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -155,7 +155,12 @@ do { \
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
-#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
+#define compat_elf_check_arch(x) \
+ (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
+
+#if __USER32_DS != __USER_DS
+# error "The following code assumes __USER32_DS == __USER_DS"
+#endif
static inline void elf_common_init(struct thread_struct *t,
struct pt_regs *regs, const u16 ds)
@@ -178,8 +183,9 @@ static inline void elf_common_init(struct thread_struct *t,
void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
#define compat_start_thread start_thread_ia32
-void set_personality_ia32(void);
-#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
+void set_personality_ia32(bool);
+#define COMPAT_SET_PERSONALITY(ex) \
+ set_personality_ia32((ex).e_machine == EM_X86_64)
#define COMPAT_ELF_PLATFORM ("i686")
@@ -286,7 +292,7 @@ do { \
#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
/* 1GB for 64bit, 8MB for 32bit */
-#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
+#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
#define ARCH_DLINFO \
do { \
@@ -295,9 +301,20 @@ do { \
(unsigned long)current->mm->context.vdso); \
} while (0)
+#define ARCH_DLINFO_X32 \
+do { \
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso); \
+} while (0)
+
#define AT_SYSINFO 32
-#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
+#define COMPAT_ARCH_DLINFO \
+if (test_thread_flag(TIF_X32)) \
+ ARCH_DLINFO_X32; \
+else \
+ ARCH_DLINFO_IA32(sysctl_vsyscall32)
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
@@ -313,6 +330,8 @@ struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
+extern int x32_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -329,7 +348,7 @@ static inline int mmap_is_ia32(void)
return 1;
#endif
#ifdef CONFIG_IA32_EMULATION
- if (test_thread_flag(TIF_IA32))
+ if (test_thread_flag(TIF_ADDR32))
return 1;
#endif
return 0;
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 1f7e62517284..ee52760549f0 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -43,6 +43,15 @@ struct ucontext_ia32 {
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
+struct ucontext_x32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ stack_ia32_t uc_stack;
+ unsigned int uc__pad0; /* needed for alignment */
+ struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
/* This matches struct stat64 in glibc2.2, hence the absolutely
* insane amounts of padding around dev_t's.
*/
@@ -116,6 +125,15 @@ typedef struct compat_siginfo {
compat_clock_t _stime;
} _sigchld;
+ /* SIGCHLD (x32 version) */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
unsigned int _addr; /* faulting insn/memory ref. */
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index f49253d75710..c5d1785373ed 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -14,6 +14,7 @@ void exit_idle(void);
#else /* !CONFIG_X86_64 */
static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
+static inline void __exit_idle(void) { }
#endif /* CONFIG_X86_64 */
void amd_e400_remove_cpu(int cpu);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 690d1cc9a877..2c4943de5150 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -21,6 +21,15 @@
#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
#define IO_APIC_REDIR_MASKED (1 << 16)
+struct io_apic_ops {
+ void (*init) (void);
+ unsigned int (*read) (unsigned int apic, unsigned int reg);
+ void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
+};
+
+void __init set_io_apic_ops(const struct io_apic_ops *);
+
/*
* The structure of the IO-APIC:
*/
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 4365ffdb461f..7e3f17f92c66 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -29,18 +29,18 @@
#define MTRR_IOCTL_BASE 'M'
-struct mtrr_sentry {
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int type; /* Type of region */
-};
-
/* Warning: this structure has a different order from i386
on x86-64. The 32bit emulation code takes care of that.
But you need to use this for 64bit, otherwise your X server
will break. */
#ifdef __i386__
+struct mtrr_sentry {
+ unsigned long base; /* Base address */
+ unsigned int size; /* Size of region */
+ unsigned int type; /* Type of region */
+};
+
struct mtrr_gentry {
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
@@ -50,12 +50,20 @@ struct mtrr_gentry {
#else /* __i386__ */
+struct mtrr_sentry {
+ __u64 base; /* Base address */
+ __u32 size; /* Size of region */
+ __u32 type; /* Type of region */
+};
+
struct mtrr_gentry {
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int regnum; /* Register number */
- unsigned int type; /* Type of region */
+ __u64 base; /* Base address */
+ __u32 size; /* Size of region */
+ __u32 regnum; /* Register number */
+ __u32 type; /* Type of region */
+ __u32 _pad; /* Unused */
};
+
#endif /* !__i386__ */
struct mtrr_var_range {
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index bb7133dc155d..3427b7798dbc 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,7 +7,9 @@
#else
# ifdef __i386__
# include "posix_types_32.h"
-# else
+# elif defined(__LP64__)
# include "posix_types_64.h"
+# else
+# include "posix_types_x32.h"
# endif
#endif
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
index f7d9adf82e53..99f262e04b91 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/asm/posix_types_32.h
@@ -7,79 +7,22 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+
typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_uid_t __kernel_uid_t
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-#define __FD_SET(fd,fdsetp) \
- asm volatile("btsl %1,%0": \
- "+m" (*(__kernel_fd_set *)(fdsetp)) \
- : "r" ((int)(fd)))
-
-#undef __FD_CLR
-#define __FD_CLR(fd,fdsetp) \
- asm volatile("btrl %1,%0": \
- "+m" (*(__kernel_fd_set *)(fdsetp)) \
- : "r" ((int) (fd)))
-
-#undef __FD_ISSET
-#define __FD_ISSET(fd,fdsetp) \
- (__extension__ \
- ({ \
- unsigned char __result; \
- asm volatile("btl %1,%2 ; setb %0" \
- : "=q" (__result) \
- : "r" ((int)(fd)), \
- "m" (*(__kernel_fd_set *)(fdsetp))); \
- __result; \
-}))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) \
-do { \
- int __d0, __d1; \
- asm volatile("cld ; rep ; stosl" \
- : "=m" (*(__kernel_fd_set *)(fdsetp)), \
- "=&c" (__d0), "=&D" (__d1) \
- : "a" (0), "1" (__FDSET_LONGS), \
- "2" ((__kernel_fd_set *)(fdsetp)) \
- : "memory"); \
-} while (0)
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_X86_POSIX_TYPES_32_H */
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h
index eb8d2d92b63e..cba0c1ead162 100644
--- a/arch/x86/include/asm/posix_types_64.h
+++ b/arch/x86/include/asm/posix_types_64.h
@@ -7,113 +7,13 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef unsigned long __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
typedef unsigned long __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant cases (8 or 32 longs,
- * for 256 and 1024-bit fd_sets respectively)
- */
-#undef __FD_ZERO
-static inline void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 32:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
- tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
- tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
- tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
- return;
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_X86_POSIX_TYPES_64_H */
diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/asm/posix_types_x32.h
new file mode 100644
index 000000000000..85f9bdafa93c
--- /dev/null
+++ b/arch/x86/include/asm/posix_types_x32.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_X86_POSIX_TYPES_X32_H
+#define _ASM_X86_POSIX_TYPES_X32_H
+
+/*
+ * This file is only used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ *
+ * These types should generally match the ones used by the 64-bit kernel,
+ *
+ */
+
+typedef long long __kernel_long_t;
+typedef unsigned long long __kernel_ulong_t;
+#define __kernel_long_t __kernel_long_t
+
+#include <asm/posix_types_64.h>
+
+#endif /* _ASM_X86_POSIX_TYPES_X32_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a19542c1685e..7284c9a6a0b5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -463,7 +463,7 @@ struct thread_struct {
unsigned long ptrace_dr7;
/* Fault info: */
unsigned long cr2;
- unsigned long trap_no;
+ unsigned long trap_nr;
unsigned long error_code;
/* floating point and extended processor state */
struct fpu fpu;
@@ -873,9 +873,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000)
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
#define STACK_TOP TASK_SIZE
@@ -897,6 +897,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
+
+/*
+ * User space RSP while inside the SYSCALL fast path
+ */
+DECLARE_PER_CPU(unsigned long, old_rsp);
+
#endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 35664547125b..dcfde52979c3 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -145,7 +145,6 @@ extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
-void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
extern long syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_leave(struct pt_regs *);
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 04459d25e66e..4a085383af27 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -230,34 +230,37 @@ struct sigcontext {
* User-space might still rely on the old definition:
*/
struct sigcontext {
- unsigned long r8;
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long r14;
- unsigned long r15;
- unsigned long rdi;
- unsigned long rsi;
- unsigned long rbp;
- unsigned long rbx;
- unsigned long rdx;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rsp;
- unsigned long rip;
- unsigned long eflags; /* RFLAGS */
- unsigned short cs;
- unsigned short gs;
- unsigned short fs;
- unsigned short __pad0;
- unsigned long err;
- unsigned long trapno;
- unsigned long oldmask;
- unsigned long cr2;
+ __u64 r8;
+ __u64 r9;
+ __u64 r10;
+ __u64 r11;
+ __u64 r12;
+ __u64 r13;
+ __u64 r14;
+ __u64 r15;
+ __u64 rdi;
+ __u64 rsi;
+ __u64 rbp;
+ __u64 rbx;
+ __u64 rdx;
+ __u64 rax;
+ __u64 rcx;
+ __u64 rsp;
+ __u64 rip;
+ __u64 eflags; /* RFLAGS */
+ __u16 cs;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
+ __u64 err;
+ __u64 trapno;
+ __u64 oldmask;
+ __u64 cr2;
struct _fpstate __user *fpstate; /* zero when no FPU context */
- unsigned long reserved1[8];
+#ifndef __LP64__
+ __u32 __fpstate_pad;
+#endif
+ __u64 reserved1[8];
};
#endif /* !__KERNEL__ */
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 4e0fe26d27d3..7c7c27c97daa 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -59,12 +59,25 @@ struct rt_sigframe_ia32 {
#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
#ifdef CONFIG_X86_64
+
struct rt_sigframe {
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
/* fp state follows here */
};
+
+#ifdef CONFIG_X86_X32_ABI
+
+struct rt_sigframe_x32 {
+ u64 pretcode;
+ struct ucontext_x32 uc;
+ compat_siginfo_t info;
+ /* fp state follows here */
+};
+
+#endif /* CONFIG_X86_X32_ABI */
+
#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
new file mode 100644
index 000000000000..ada93b3b8c66
--- /dev/null
+++ b/arch/x86/include/asm/sighandling.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_X86_SIGHANDLING_H
+#define _ASM_X86_SIGHANDLING_H
+
+#include <linux/compiler.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+
+#include <asm/processor-flags.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
+ X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
+ X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
+ X86_EFLAGS_CF)
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ unsigned long *pax);
+int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
+ struct pt_regs *regs, unsigned long mask);
+
+#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index cb238526a9f1..3fda9db48819 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -10,6 +10,8 @@
#ifndef _ASM_X86_SYS_IA32_H
#define _ASM_X86_SYS_IA32_H
+#ifdef CONFIG_COMPAT
+
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
@@ -36,8 +38,6 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
struct sigaction32 __user *, unsigned int);
asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
struct old_sigaction32 __user *);
-asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
- compat_sigset_t __user *, unsigned int);
asmlinkage long sys32_alarm(unsigned int);
asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
@@ -83,4 +83,7 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
const char __user *);
+
+#endif /* CONFIG_COMPAT */
+
#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d962e5652a73..386b78686c4d 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/err.h>
#include <asm/asm-offsets.h> /* For NR_syscalls */
+#include <asm/unistd.h>
extern const unsigned long sys_call_table[];
@@ -26,13 +27,13 @@ extern const unsigned long sys_call_table[];
*/
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
- return regs->orig_ax;
+ return regs->orig_ax & __SYSCALL_MASK;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
- regs->ax = regs->orig_ax;
+ regs->ax = regs->orig_ax & __SYSCALL_MASK;
}
static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index cfd8144d5527..ad6df8ccd715 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -86,7 +86,7 @@ struct thread_info {
#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
-#define TIF_IA32 17 /* 32bit process */
+#define TIF_IA32 17 /* IA32 compatibility process */
#define TIF_FORK 18 /* ret_from_fork */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_DEBUG 21 /* uses debug registers */
@@ -95,6 +95,8 @@ struct thread_info {
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
+#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
+#define TIF_X32 30 /* 32-bit native x86-64 binary */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -116,6 +118,8 @@ struct thread_info {
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_ADDR32 (1 << TIF_ADDR32)
+#define _TIF_X32 (1 << TIF_X32)
/* work to do in syscall_trace_enter() */
#define _TIF_WORK_SYSCALL_ENTRY \
@@ -262,6 +266,18 @@ static inline void set_restore_sigmask(void)
ti->status |= TS_RESTORE_SIGMASK;
set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
}
+
+static inline bool is_ia32_task(void)
+{
+#ifdef CONFIG_X86_32
+ return true;
+#endif
+#ifdef CONFIG_IA32_EMULATION
+ if (current_thread_info()->status & TS_COMPAT)
+ return true;
+#endif
+ return false;
+}
#endif /* !__ASSEMBLY__ */
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0012d0902c5f..88eae2aec619 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
asmlinkage void mce_threshold_interrupt(void);
#endif
+/* Interrupts/Exceptions */
+enum {
+ X86_TRAP_DE = 0, /* 0, Divide-by-zero */
+ X86_TRAP_DB, /* 1, Debug */
+ X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
+ X86_TRAP_BP, /* 3, Breakpoint */
+ X86_TRAP_OF, /* 4, Overflow */
+ X86_TRAP_BR, /* 5, Bound Range Exceeded */
+ X86_TRAP_UD, /* 6, Invalid Opcode */
+ X86_TRAP_NM, /* 7, Device Not Available */
+ X86_TRAP_DF, /* 8, Double Fault */
+ X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
+ X86_TRAP_TS, /* 10, Invalid TSS */
+ X86_TRAP_NP, /* 11, Segment Not Present */
+ X86_TRAP_SS, /* 12, Stack Segment Fault */
+ X86_TRAP_GP, /* 13, General Protection Fault */
+ X86_TRAP_PF, /* 14, Page Fault */
+ X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
+ X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
+ X86_TRAP_AC, /* 17, Alignment Check */
+ X86_TRAP_MC, /* 18, Machine Check */
+ X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
+ X86_TRAP_IRET = 32, /* 32, IRET Exception */
+};
+
#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 21f77b89e47a..37cdc9d99bb1 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,7 +1,17 @@
#ifndef _ASM_X86_UNISTD_H
#define _ASM_X86_UNISTD_H 1
+/* x32 syscall flag bit */
+#define __X32_SYSCALL_BIT 0x40000000
+
#ifdef __KERNEL__
+
+# ifdef CONFIG_X86_X32_ABI
+# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
+# else
+# define __SYSCALL_MASK (~0)
+# endif
+
# ifdef CONFIG_X86_32
# include <asm/unistd_32.h>
@@ -14,6 +24,7 @@
# else
# include <asm/unistd_64.h>
+# include <asm/unistd_64_x32.h>
# define __ARCH_WANT_COMPAT_SYS_TIME
# endif
@@ -52,8 +63,10 @@
#else
# ifdef __i386__
# include <asm/unistd_32.h>
-# else
+# elif defined(__LP64__)
# include <asm/unistd_64.h>
+# else
+# include <asm/unistd_x32.h>
# endif
#endif
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcaceb..8b38be2de9e1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,13 +5,8 @@
#include <linux/clocksource.h>
struct vsyscall_gtod_data {
- seqlock_t lock;
+ seqcount_t seq;
- /* open coded 'struct timespec' */
- time_t wall_time_sec;
- u32 wall_time_nsec;
-
- struct timezone sys_tz;
struct { /* extract of a clocksource struct */
int vclock_mode;
cycle_t cycle_last;
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
u32 mult;
u32 shift;
} clock;
- struct timespec wall_to_monotonic;
+
+ /* open coded 'struct timespec' */
+ time_t wall_time_sec;
+ u32 wall_time_nsec;
+ u32 monotonic_time_nsec;
+ time_t monotonic_time_sec;
+
+ struct timezone sys_tz;
struct timespec wall_time_coarse;
+ struct timespec monotonic_time_coarse;
};
extern struct vsyscall_gtod_data vsyscall_gtod_data;
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 6bf5b8e478c0..92e54abf89e0 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -18,6 +18,11 @@ static const struct cpumask *x2apic_target_cpus(void)
return cpu_online_mask;
}
+static int x2apic_apic_id_valid(int apicid)
+{
+ return 1;
+}
+
static int x2apic_apic_id_registered(void)
{
return 1;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 406ed77216d0..a415b1f44365 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -239,7 +239,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
- if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled)
+ if (!apic->apic_id_valid(apic_id) && enabled)
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
else
acpi_register_lapic(apic_id, enabled);
@@ -642,6 +642,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
kfree(buffer.pointer);
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
+ lapic = NULL;
if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
goto out;
@@ -650,7 +651,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
goto free_tmp_map;
cpumask_copy(tmp_map, cpu_present_mask);
- acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
+ acpi_register_lapic(physid, ACPI_MADT_ENABLED);
/*
* If mp_register_lapic successfully generates a new logical cpu
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2eec05b6d1b8..11544d8f1e97 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -383,20 +383,25 @@ static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
{
- unsigned int rsvd; /* 0: uninitialized */
+ unsigned int rsvd, vector;
if (offset >= APIC_EILVT_NR_MAX)
return ~0;
- rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED;
+ rsvd = atomic_read(&eilvt_offsets[offset]);
do {
- if (rsvd &&
- !eilvt_entry_is_changeable(rsvd, new))
+ vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
+ if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
} while (rsvd != new);
+ rsvd &= ~APIC_EILVT_MASKED;
+ if (rsvd && rsvd != vector)
+ pr_info("LVT offset %d assigned for vector 0x%02x\n",
+ offset, rsvd);
+
return new;
}
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index d9ea5f331ac5..899803e03214 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -229,11 +229,10 @@ static int __init numachip_system_init(void)
}
early_initcall(numachip_system_init);
-static int __cpuinit numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strncmp(oem_id, "NUMASC", 6)) {
numachip_system = 1;
- setup_force_cpu_cap(X86_FEATURE_X2APIC);
return 1;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6d10a66fc5a9..e88300d8e80a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -64,9 +64,28 @@
#include <asm/apic.h>
#define __apicdebuginit(type) static type __init
+
#define for_each_irq_pin(entry, head) \
for (entry = head; entry; entry = entry->next)
+static void __init __ioapic_init_mappings(void);
+
+static unsigned int __io_apic_read (unsigned int apic, unsigned int reg);
+static void __io_apic_write (unsigned int apic, unsigned int reg, unsigned int val);
+static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
+
+static struct io_apic_ops io_apic_ops = {
+ .init = __ioapic_init_mappings,
+ .read = __io_apic_read,
+ .write = __io_apic_write,
+ .modify = __io_apic_modify,
+};
+
+void __init set_io_apic_ops(const struct io_apic_ops *ops)
+{
+ io_apic_ops = *ops;
+}
+
/*
* Is the SiS APIC rmw bug present ?
* -1 = don't know, 0 = no, 1 = yes
@@ -294,6 +313,22 @@ static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
irq_free_desc(at);
}
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+ return io_apic_ops.read(apic, reg);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ io_apic_ops.write(apic, reg, value);
+}
+
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ io_apic_ops.modify(apic, reg, value);
+}
+
+
struct io_apic {
unsigned int index;
unsigned int unused[3];
@@ -314,16 +349,17 @@ static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
writel(vector, &io_apic->eoi);
}
-static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+static unsigned int __io_apic_read(unsigned int apic, unsigned int reg)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
writel(reg, &io_apic->index);
return readl(&io_apic->data);
}
-static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+static void __io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
+
writel(reg, &io_apic->index);
writel(value, &io_apic->data);
}
@@ -334,7 +370,7 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
*
* Older SiS APIC requires we rewrite the index register
*/
-static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
@@ -377,6 +413,7 @@ static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
+
return eu.entry;
}
@@ -384,9 +421,11 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
union entry_union eu;
unsigned long flags;
+
raw_spin_lock_irqsave(&ioapic_lock, flags);
eu.entry = __ioapic_read_entry(apic, pin);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+
return eu.entry;
}
@@ -396,8 +435,7 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
* the interrupt, and we need to make sure the entry is fully populated
* before that happens.
*/
-static void
-__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
union entry_union eu = {{0, 0}};
@@ -409,6 +447,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
+
raw_spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, e);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -435,8 +474,7 @@ static void ioapic_mask_entry(int apic, int pin)
* shared ISA-space IRQs, so we have to support them. We are super
* fast in the common case, and fast for shared ISA-space IRQs.
*/
-static int
-__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
+static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
{
struct irq_pin_list **last, *entry;
@@ -521,6 +559,7 @@ static void io_apic_sync(struct irq_pin_list *entry)
* a dummy read from the IO-APIC
*/
struct io_apic __iomem *io_apic;
+
io_apic = io_apic_base(entry->apic);
readl(&io_apic->data);
}
@@ -2512,21 +2551,73 @@ static void ack_apic_edge(struct irq_data *data)
atomic_t irq_mis_count;
-static void ack_apic_level(struct irq_data *data)
-{
- struct irq_cfg *cfg = data->chip_data;
- int i, do_unmask_irq = 0, irq = data->irq;
- unsigned long v;
-
- irq_complete_move(cfg);
#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+{
/* If we are moving the irq we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
- do_unmask_irq = 1;
mask_ioapic(cfg);
+ return true;
}
+ return false;
+}
+
+static inline void ioapic_irqd_unmask(struct irq_data *data,
+ struct irq_cfg *cfg, bool masked)
+{
+ if (unlikely(masked)) {
+ /* Only migrate the irq if the ack has been received.
+ *
+ * On rare occasions the broadcast level triggered ack gets
+ * delayed going to ioapics, and if we reprogram the
+ * vector while Remote IRR is still set the irq will never
+ * fire again.
+ *
+ * To prevent this scenario we read the Remote IRR bit
+ * of the ioapic. This has two effects.
+ * - On any sane system the read of the ioapic will
+ * flush writes (and acks) going to the ioapic from
+ * this cpu.
+ * - We get to see if the ACK has actually been delivered.
+ *
+ * Based on failed experiments of reprogramming the
+ * ioapic entry from outside of irq context starting
+ * with masking the ioapic entry and then polling until
+ * Remote IRR was clear before reprogramming the
+ * ioapic I don't trust the Remote IRR bit to be
+ * completey accurate.
+ *
+ * However there appears to be no other way to plug
+ * this race, so if the Remote IRR bit is not
+ * accurate and is causing problems then it is a hardware bug
+ * and you can go talk to the chipset vendor about it.
+ */
+ if (!io_apic_level_ack_pending(cfg))
+ irq_move_masked_irq(data);
+ unmask_ioapic(cfg);
+ }
+}
+#else
+static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+{
+ return false;
+}
+static inline void ioapic_irqd_unmask(struct irq_data *data,
+ struct irq_cfg *cfg, bool masked)
+{
+}
#endif
+static void ack_apic_level(struct irq_data *data)
+{
+ struct irq_cfg *cfg = data->chip_data;
+ int i, irq = data->irq;
+ unsigned long v;
+ bool masked;
+
+ irq_complete_move(cfg);
+ masked = ioapic_irqd_mask(data, cfg);
+
/*
* It appears there is an erratum which affects at least version 0x11
* of I/O APIC (that's the 82093AA and cores integrated into various
@@ -2581,38 +2672,7 @@ static void ack_apic_level(struct irq_data *data)
eoi_ioapic_irq(irq, cfg);
}
- /* Now we can move and renable the irq */
- if (unlikely(do_unmask_irq)) {
- /* Only migrate the irq if the ack has been received.
- *
- * On rare occasions the broadcast level triggered ack gets
- * delayed going to ioapics, and if we reprogram the
- * vector while Remote IRR is still set the irq will never
- * fire again.
- *
- * To prevent this scenario we read the Remote IRR bit
- * of the ioapic. This has two effects.
- * - On any sane system the read of the ioapic will
- * flush writes (and acks) going to the ioapic from
- * this cpu.
- * - We get to see if the ACK has actually been delivered.
- *
- * Based on failed experiments of reprogramming the
- * ioapic entry from outside of irq context starting
- * with masking the ioapic entry and then polling until
- * Remote IRR was clear before reprogramming the
- * ioapic I don't trust the Remote IRR bit to be
- * completey accurate.
- *
- * However there appears to be no other way to plug
- * this race, so if the Remote IRR bit is not
- * accurate and is causing problems then it is a hardware bug
- * and you can go talk to the chipset vendor about it.
- */
- if (!io_apic_level_ack_pending(cfg))
- irq_move_masked_irq(data);
- unmask_ioapic(cfg);
- }
+ ioapic_irqd_unmask(data, cfg, masked);
}
#ifdef CONFIG_IRQ_REMAP
@@ -3873,6 +3933,11 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics)
void __init ioapic_and_gsi_init(void)
{
+ io_apic_ops.init();
+}
+
+static void __init __ioapic_init_mappings(void)
+{
unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
struct resource *ioapic_res;
int i;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 9193713060a9..48f3103b3c93 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -213,7 +213,7 @@ static struct apic apic_x2apic_cluster = {
.name = "cluster x2apic",
.probe = x2apic_cluster_probe,
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
- .apic_id_valid = default_apic_id_valid,
+ .apic_id_valid = x2apic_apic_id_valid,
.apic_id_registered = x2apic_apic_id_registered,
.irq_delivery_mode = dest_LowestPrio,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index bcd1db6eaca9..8a778db45e3a 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -119,7 +119,7 @@ static struct apic apic_x2apic_phys = {
.name = "physical x2apic",
.probe = x2apic_phys_probe,
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
- .apic_id_valid = default_apic_id_valid,
+ .apic_id_valid = x2apic_apic_id_valid,
.apic_id_registered = x2apic_apic_id_registered,
.irq_delivery_mode = dest_Fixed,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index fc4771425852..87bfa69e216e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -266,6 +266,11 @@ static void uv_send_IPI_all(int vector)
uv_send_IPI_mask(cpu_online_mask, vector);
}
+static int uv_apic_id_valid(int apicid)
+{
+ return 1;
+}
+
static int uv_apic_id_registered(void)
{
return 1;
@@ -351,7 +356,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
.name = "UV large system",
.probe = uv_probe,
.acpi_madt_oem_check = uv_acpi_madt_oem_check,
- .apic_id_valid = default_apic_id_valid,
+ .apic_id_valid = uv_apic_id_valid,
.apic_id_registered = uv_apic_id_registered,
.irq_delivery_mode = dest_Fixed,
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e25..1b4754f82ba7 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
#include <asm/ia32.h>
#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
+#ifdef CONFIG_X86_X32_ABI
+# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
+#else
+# define __SYSCALL_X32(nr, sym, compat) /* nothing */
+#endif
static char syscalls_64[] = {
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e49477444fff..67e258362a3d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -999,7 +999,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
else
printk(KERN_CONT "\n");
- __print_cpu_msr();
+ print_cpu_msr(c);
}
void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb27..a041e094b8b9 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
{
int err = 0;
mtrr_type type;
+ unsigned long base;
unsigned long size;
struct mtrr_sentry sentry;
struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
#endif
if (gentry.regnum >= num_var_ranges)
return -EINVAL;
- mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
+ mtrr_if->get(gentry.regnum, &base, &size, &type);
/* Hide entries that go above 4GB */
- if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
+ if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
|| size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
gentry.base = gentry.size = gentry.type = 0;
else {
- gentry.base <<= PAGE_SHIFT;
+ gentry.base = base << PAGE_SHIFT;
gentry.size = size << PAGE_SHIFT;
gentry.type = type;
}
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
#endif
if (gentry.regnum >= num_var_ranges)
return -EINVAL;
- mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
+ mtrr_if->get(gentry.regnum, &base, &size, &type);
/* Hide entries that would overflow */
if (size != (__typeof__(gentry.size))size)
gentry.base = gentry.size = gentry.type = 0;
else {
+ gentry.base = base;
gentry.size = size;
gentry.type = type;
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fa2900c0e398..40883ffe2da9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,7 +29,6 @@
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
-#include <asm/compat.h>
#include <asm/smp.h>
#include <asm/alternative.h>
#include <asm/timer.h>
@@ -1748,6 +1747,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
}
#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4f928f..1b81839b6c88 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -37,13 +37,16 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
struct thread_info *tinfo, int *graph)
{
- struct task_struct *task = tinfo->task;
+ struct task_struct *task;
unsigned long ret_addr;
- int index = task->curr_ret_stack;
+ int index;
if (addr != (unsigned long)return_to_handler)
return;
+ task = tinfo->task;
+ index = task->curr_ret_stack;
+
if (!task->ret_stack || index < *graph)
return;
@@ -265,7 +268,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
#endif
printk("\n");
if (notify_die(DIE_OOPS, str, regs, err,
- current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
return 1;
show_registers(regs);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 734ebd1d3caa..cdc79b5cfcd9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -481,7 +481,12 @@ GLOBAL(system_call_after_swapgs)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz tracesys
system_call_fastpath:
+#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
+#else
+ andl $__SYSCALL_MASK,%eax
+ cmpl $__NR_syscall_max,%eax
+#endif
ja badsys
movq %r10,%rcx
call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -595,7 +600,12 @@ tracesys:
*/
LOAD_ARGS ARGOFFSET, 1
RESTORE_REST
+#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
+#else
+ andl $__SYSCALL_MASK,%eax
+ cmpl $__NR_syscall_max,%eax
+#endif
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8)
@@ -735,6 +745,40 @@ ENTRY(stub_rt_sigreturn)
CFI_ENDPROC
END(stub_rt_sigreturn)
+#ifdef CONFIG_X86_X32_ABI
+ PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
+
+ENTRY(stub_x32_rt_sigreturn)
+ CFI_STARTPROC
+ addq $8, %rsp
+ PARTIAL_FRAME 0
+ SAVE_REST
+ movq %rsp,%rdi
+ FIXUP_TOP_OF_STACK %r11
+ call sys32_x32_rt_sigreturn
+ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+END(stub_x32_rt_sigreturn)
+
+ENTRY(stub_x32_execve)
+ CFI_STARTPROC
+ addq $8, %rsp
+ PARTIAL_FRAME 0
+ SAVE_REST
+ FIXUP_TOP_OF_STACK %r11
+ movq %rsp, %rcx
+ call sys32_execve
+ RESTORE_TOP_OF_STACK %r11
+ movq %rax,RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+END(stub_x32_execve)
+
+#endif
+
/*
* Build the entry stubs and pointer table with some assembler magic.
* We pack 7 stubs into a single 32-byte chunk, which will fit in a
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 6d5fc8cfd5d6..252981afd6c4 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
outb(0, 0xF0);
if (ignore_fpu_irq || !boot_cpu_data.hard_math)
return IRQ_NONE;
- math_error(get_irq_regs(), 0, 16);
+ math_error(get_irq_regs(), 0, X86_TRAP_MF);
return IRQ_HANDLED;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9b24f36eb55f..a33afaa5ddb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -12,6 +12,9 @@
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
#include <linux/utsname.h>
+#include <linux/stackprotector.h>
+#include <linux/tick.h>
+#include <linux/cpuidle.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
@@ -22,6 +25,24 @@
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/debugreg.h>
+#include <asm/nmi.h>
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU(unsigned char, is_idle);
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+#endif
struct kmem_cache *task_xstate_cachep;
EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -370,6 +391,99 @@ static inline int hlt_use_halt(void)
}
#endif
+#ifndef CONFIG_SMP
+static inline void play_dead(void)
+{
+ BUG();
+}
+#endif
+
+#ifdef CONFIG_X86_64
+void enter_idle(void)
+{
+ percpu_write(is_idle, 1);
+ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+}
+
+static void __exit_idle(void)
+{
+ if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
+ return;
+ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+ /* idle loop has pid 0 */
+ if (current->pid)
+ return;
+ __exit_idle();
+}
+#endif
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+ /*
+ * If we're the non-boot CPU, nothing set the stack canary up
+ * for us. CPU0 already has it initialized but no harm in
+ * doing it again. This is a good place for updating it, as
+ * we wont ever return from this function (so the invalid
+ * canaries already on the stack wont ever trigger).
+ */
+ boot_init_stack_canary();
+ current_thread_info()->status |= TS_POLLING;
+
+ while (1) {
+ tick_nohz_idle_enter();
+
+ while (!need_resched()) {
+ rmb();
+
+ if (cpu_is_offline(smp_processor_id()))
+ play_dead();
+
+ /*
+ * Idle routines should keep interrupts disabled
+ * from here on, until they go to idle.
+ * Otherwise, idle callbacks can misfire.
+ */
+ local_touch_nmi();
+ local_irq_disable();
+
+ enter_idle();
+
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+
+ /* enter_idle() needs rcu for notifiers */
+ rcu_idle_enter();
+
+ if (cpuidle_idle_call())
+ pm_idle();
+
+ rcu_idle_exit();
+ start_critical_timings();
+
+ /* In many cases the interrupt that ended idle
+ has already called exit_idle. But some idle
+ loops can be woken up without interrupt. */
+ __exit_idle();
+ }
+
+ tick_nohz_idle_exit();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
/*
* We use this if we don't have any better
* idle routine..
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index aae4f4bbbe88..ae6847303e26 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -9,7 +9,6 @@
* This file handles the architecture-dependent parts of process handling..
*/
-#include <linux/stackprotector.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
@@ -31,14 +30,12 @@
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
-#include <linux/tick.h>
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
-#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/ldt.h>
@@ -57,7 +54,6 @@
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
-#include <asm/nmi.h>
#include <asm/switch_to.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return ((unsigned long *)tsk->thread.sp)[3];
}
-#ifndef CONFIG_SMP
-static inline void play_dead(void)
-{
- BUG();
-}
-#endif
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
- int cpu = smp_processor_id();
-
- /*
- * If we're the non-boot CPU, nothing set the stack canary up
- * for us. CPU0 already has it initialized but no harm in
- * doing it again. This is a good place for updating it, as
- * we wont ever return from this function (so the invalid
- * canaries already on the stack wont ever trigger).
- */
- boot_init_stack_canary();
-
- current_thread_info()->status |= TS_POLLING;
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched()) {
-
- check_pgt_cache();
- rmb();
-
- if (cpu_is_offline(cpu))
- play_dead();
-
- local_touch_nmi();
- local_irq_disable();
- /* Don't trace irqs off for idle */
- stop_critical_timings();
- if (cpuidle_idle_call())
- pm_idle();
- start_critical_timings();
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
void __show_regs(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 61270e8d428a..733ca39f367e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -14,7 +14,6 @@
* This file handles the architecture-dependent parts of process handling..
*/
-#include <linux/stackprotector.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
@@ -32,12 +31,10 @@
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#include <linux/tick.h>
#include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
-#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -51,115 +48,11 @@
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
-#include <asm/nmi.h>
#include <asm/switch_to.h>
asmlinkage extern void ret_from_fork(void);
DEFINE_PER_CPU(unsigned long, old_rsp);
-static DEFINE_PER_CPU(unsigned char, is_idle);
-
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-
-void enter_idle(void)
-{
- percpu_write(is_idle, 1);
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
- if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
- return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-/* Called from interrupts to signify idle end */
-void exit_idle(void)
-{
- /* idle loop has pid 0 */
- if (current->pid)
- return;
- __exit_idle();
-}
-
-#ifndef CONFIG_SMP
-static inline void play_dead(void)
-{
- BUG();
-}
-#endif
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
- current_thread_info()->status |= TS_POLLING;
-
- /*
- * If we're the non-boot CPU, nothing set the stack canary up
- * for us. CPU0 already has it initialized but no harm in
- * doing it again. This is a good place for updating it, as
- * we wont ever return from this function (so the invalid
- * canaries already on the stack wont ever trigger).
- */
- boot_init_stack_canary();
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- while (!need_resched()) {
-
- rmb();
-
- if (cpu_is_offline(smp_processor_id()))
- play_dead();
- /*
- * Idle routines should keep interrupts disabled
- * from here on, until they go to idle.
- * Otherwise, idle callbacks can misfire.
- */
- local_touch_nmi();
- local_irq_disable();
- enter_idle();
- /* Don't trace irqs off for idle */
- stop_critical_timings();
-
- /* enter_idle() needs rcu for notifiers */
- rcu_idle_enter();
-
- if (cpuidle_idle_call())
- pm_idle();
-
- rcu_idle_exit();
- start_critical_timings();
-
- /* In many cases the interrupt that ended idle
- has already called exit_idle. But some idle
- loops can be woken up without interrupt. */
- __exit_idle();
- }
-
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
-}
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)
@@ -365,7 +258,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
{
start_thread_common(regs, new_ip, new_sp,
- __USER32_CS, __USER32_DS, __USER32_DS);
+ test_thread_flag(TIF_X32)
+ ? __USER_CS : __USER32_CS,
+ __USER_DS, __USER_DS);
}
#endif
@@ -488,6 +383,8 @@ void set_personality_64bit(void)
/* Make sure to be in 64bit mode */
clear_thread_flag(TIF_IA32);
+ clear_thread_flag(TIF_ADDR32);
+ clear_thread_flag(TIF_X32);
/* Ensure the corresponding mm is not marked. */
if (current->mm)
@@ -500,20 +397,31 @@ void set_personality_64bit(void)
current->personality &= ~READ_IMPLIES_EXEC;
}
-void set_personality_ia32(void)
+void set_personality_ia32(bool x32)
{
/* inherit personality from parent */
/* Make sure to be in 32bit mode */
- set_thread_flag(TIF_IA32);
- current->personality |= force_personality32;
+ set_thread_flag(TIF_ADDR32);
/* Mark the associated mm as containing 32-bit tasks. */
if (current->mm)
current->mm->context.ia32_compat = 1;
- /* Prepare the first "return" to user space */
- current_thread_info()->status |= TS_COMPAT;
+ if (x32) {
+ clear_thread_flag(TIF_IA32);
+ set_thread_flag(TIF_X32);
+ current->personality &= ~READ_IMPLIES_EXEC;
+ /* is_compat_task() uses the presence of the x32
+ syscall bit flag to determine compat status */
+ current_thread_info()->status &= ~TS_COMPAT;
+ } else {
+ set_thread_flag(TIF_IA32);
+ clear_thread_flag(TIF_X32);
+ current->personality |= force_personality32;
+ /* Prepare the first "return" to user space */
+ current_thread_info()->status |= TS_COMPAT;
+ }
}
unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8a634c887652..685845cf16e0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -33,6 +33,7 @@
#include <asm/prctl.h>
#include <asm/proto.h>
#include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
#include "tls.h"
@@ -1130,6 +1131,100 @@ static int genregs32_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_X86_X32_ABI
+static long x32_arch_ptrace(struct task_struct *child,
+ compat_long_t request, compat_ulong_t caddr,
+ compat_ulong_t cdata)
+{
+ unsigned long addr = caddr;
+ unsigned long data = cdata;
+ void __user *datap = compat_ptr(data);
+ int ret;
+
+ switch (request) {
+ /* Read 32bits at location addr in the USER area. Only allow
+ to return the lower 32bits of segment and debug registers. */
+ case PTRACE_PEEKUSR: {
+ u32 tmp;
+
+ ret = -EIO;
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
+ addr < offsetof(struct user_regs_struct, cs))
+ break;
+
+ tmp = 0; /* Default return condition */
+ if (addr < sizeof(struct user_regs_struct))
+ tmp = getreg(child, addr);
+ else if (addr >= offsetof(struct user, u_debugreg[0]) &&
+ addr <= offsetof(struct user, u_debugreg[7])) {
+ addr -= offsetof(struct user, u_debugreg[0]);
+ tmp = ptrace_get_debugreg(child, addr / sizeof(data));
+ }
+ ret = put_user(tmp, (__u32 __user *)datap);
+ break;
+ }
+
+ /* Write the word at location addr in the USER area. Only allow
+ to update segment and debug registers with the upper 32bits
+ zero-extended. */
+ case PTRACE_POKEUSR:
+ ret = -EIO;
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
+ addr < offsetof(struct user_regs_struct, cs))
+ break;
+
+ if (addr < sizeof(struct user_regs_struct))
+ ret = putreg(child, addr, data);
+ else if (addr >= offsetof(struct user, u_debugreg[0]) &&
+ addr <= offsetof(struct user, u_debugreg[7])) {
+ addr -= offsetof(struct user, u_debugreg[0]);
+ ret = ptrace_set_debugreg(child,
+ addr / sizeof(data), data);
+ }
+ break;
+
+ case PTRACE_GETREGS: /* Get all gp regs from the child. */
+ return copy_regset_to_user(child,
+ task_user_regset_view(current),
+ REGSET_GENERAL,
+ 0, sizeof(struct user_regs_struct),
+ datap);
+
+ case PTRACE_SETREGS: /* Set all gp regs in the child. */
+ return copy_regset_from_user(child,
+ task_user_regset_view(current),
+ REGSET_GENERAL,
+ 0, sizeof(struct user_regs_struct),
+ datap);
+
+ case PTRACE_GETFPREGS: /* Get the child FPU state. */
+ return copy_regset_to_user(child,
+ task_user_regset_view(current),
+ REGSET_FP,
+ 0, sizeof(struct user_i387_struct),
+ datap);
+
+ case PTRACE_SETFPREGS: /* Set the child FPU state. */
+ return copy_regset_from_user(child,
+ task_user_regset_view(current),
+ REGSET_FP,
+ 0, sizeof(struct user_i387_struct),
+ datap);
+
+ /* normal 64bit interface to access TLS data.
+ Works just like arch_prctl, except that the arguments
+ are reversed. */
+ case PTRACE_ARCH_PRCTL:
+ return do_arch_prctl(child, data, addr);
+
+ default:
+ return compat_ptrace_request(child, request, addr, data);
+ }
+
+ return ret;
+}
+#endif
+
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
@@ -1139,6 +1234,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
int ret;
__u32 val;
+#ifdef CONFIG_X86_X32_ABI
+ if (!is_ia32_task())
+ return x32_arch_ptrace(child, request, caddr, cdata);
+#endif
+
switch (request) {
case PTRACE_PEEKUSR:
ret = getreg32(child, addr, &val);
@@ -1326,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
int error_code, int si_code,
struct siginfo *info)
{
- tsk->thread.trap_no = 1;
+ tsk->thread.trap_nr = X86_TRAP_DB;
tsk->thread.error_code = error_code;
memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 25edcfc9ba5b..115eac431483 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
-#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
-#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
@@ -27,10 +25,12 @@
#include <asm/fpu-internal.h>
#include <asm/vdso.h>
#include <asm/mce.h>
+#include <asm/sighandling.h>
#ifdef CONFIG_X86_64
#include <asm/proto.h>
#include <asm/ia32_unistd.h>
+#include <asm/sys_ia32.h>
#endif /* CONFIG_X86_64 */
#include <asm/syscall.h>
@@ -38,13 +38,6 @@
#include <asm/sigframe.h>
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
- X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
- X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
- X86_EFLAGS_CF)
-
#ifdef CONFIG_X86_32
# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
#else
@@ -69,9 +62,8 @@
regs->seg = GET_SEG(seg) | 3; \
} while (0)
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
- unsigned long *pax)
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ unsigned long *pax)
{
void __user *buf;
unsigned int tmpflags;
@@ -126,9 +118,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
return err;
}
-static int
-setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
- struct pt_regs *regs, unsigned long mask)
+int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
+ struct pt_regs *regs, unsigned long mask)
{
int err = 0;
@@ -160,7 +151,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
put_user_ex(regs->r15, &sc->r15);
#endif /* CONFIG_X86_64 */
- put_user_ex(current->thread.trap_no, &sc->trapno);
+ put_user_ex(current->thread.trap_nr, &sc->trapno);
put_user_ex(current->thread.error_code, &sc->err);
put_user_ex(regs->ip, &sc->ip);
#ifdef CONFIG_X86_32
@@ -643,6 +634,16 @@ static int signr_convert(int sig)
#define is_ia32 0
#endif /* CONFIG_IA32_EMULATION */
+#ifdef CONFIG_X86_X32_ABI
+#define is_x32 test_thread_flag(TIF_X32)
+
+static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
+ siginfo_t *info, compat_sigset_t *set,
+ struct pt_regs *regs);
+#else /* !CONFIG_X86_X32_ABI */
+#define is_x32 0
+#endif /* CONFIG_X86_X32_ABI */
+
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -667,8 +668,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
else
ret = ia32_setup_frame(usig, ka, set, regs);
- } else
+#ifdef CONFIG_X86_X32_ABI
+ } else if (is_x32) {
+ ret = x32_setup_rt_frame(usig, ka, info,
+ (compat_sigset_t *)set, regs);
+#endif
+ } else {
ret = __setup_rt_frame(sig, ka, info, set, regs);
+ }
if (ret) {
force_sigsegv(sig, current);
@@ -851,3 +858,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
force_sig(SIGSEGV, me);
}
+
+#ifdef CONFIG_X86_X32_ABI
+static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
+ siginfo_t *info, compat_sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe_x32 __user *frame;
+ void __user *restorer;
+ int err = 0;
+ void __user *fpstate = NULL;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ return -EFAULT;
+
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ if (copy_siginfo_to_user32(&frame->info, info))
+ return -EFAULT;
+ }
+
+ put_user_try {
+ /* Create the ucontext. */
+ if (cpu_has_xsave)
+ put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+ else
+ put_user_ex(0, &frame->uc.uc_flags);
+ put_user_ex(0, &frame->uc.uc_link);
+ put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ put_user_ex(sas_ss_flags(regs->sp),
+ &frame->uc.uc_stack.ss_flags);
+ put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ put_user_ex(0, &frame->uc.uc__pad0);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ restorer = ka->sa.sa_restorer;
+ } else {
+ /* could use a vstub here */
+ restorer = NULL;
+ err |= -EFAULT;
+ }
+ put_user_ex(restorer, &frame->pretcode);
+ } put_user_catch(err);
+
+ if (err)
+ return -EFAULT;
+
+ /* Set up registers for signal handler */
+ regs->sp = (unsigned long) frame;
+ regs->ip = (unsigned long) ka->sa.sa_handler;
+
+ /* We use the x32 calling convention here... */
+ regs->di = sig;
+ regs->si = (unsigned long) &frame->info;
+ regs->dx = (unsigned long) &frame->uc;
+
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+
+ regs->cs = __USER_CS;
+ regs->ss = __USER_DS;
+
+ return 0;
+}
+
+asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe_x32 __user *frame;
+ sigset_t set;
+ unsigned long ax;
+ struct pt_regs tregs;
+
+ frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ goto badframe;
+
+ tregs = *regs;
+ if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
+ goto badframe;
+
+ return ax;
+
+badframe:
+ signal_fault(regs, frame, "x32 rt_sigreturn");
+ return 0;
+}
+#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5104a2b685cf..6e1e406038c2 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -50,6 +50,7 @@
#include <linux/tboot.h>
#include <linux/stackprotector.h>
#include <linux/gfp.h>
+#include <linux/cpuidle.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -219,14 +220,9 @@ static void __cpuinit smp_callin(void)
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
* accurate as the value just calculated.
- *
- * Need to enable IRQs because it can take longer and then
- * the NMI watchdog might kill us.
*/
- local_irq_enable();
calibrate_delay();
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
- local_irq_disable();
pr_debug("Stack at about %p\n", &cpuid);
/*
@@ -1409,7 +1405,8 @@ void native_play_dead(void)
tboot_shutdown(TB_SHUTDOWN_WFS);
mwait_play_dead(); /* Only returns on failure */
- hlt_play_dead();
+ if (cpuidle_play_dead())
+ hlt_play_dead();
}
#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index ef59642ff1bf..b4d3c3927dd8 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
static void find_start_end(unsigned long flags, unsigned long *begin,
unsigned long *end)
{
- if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
+ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
unsigned long new_begin;
/* This is usually used needed to map code in small
model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start))
return addr;
}
- if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
&& len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
/* for MAP_32BIT mappings we force the legact mmap base */
- if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
+ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
goto bottomup;
/* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02c..5c7f8c20da74 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
#include <linux/cache.h>
#include <asm/asm-offsets.h>
+#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+
+#ifdef CONFIG_X86_X32_ABI
+# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+#else
+# define __SYSCALL_X32(nr, sym, compat) /* nothing */
+#endif
+
#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index e2410e27f97e..6410744ac5cb 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -272,7 +272,7 @@ static void tboot_copy_fadt(const struct acpi_table_fadt *fadt)
offsetof(struct acpi_table_facs, firmware_waking_vector);
}
-void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
+static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
{
static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
/* S0,1,2: */ -1, -1, -1,
@@ -281,7 +281,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
/* S5: */ TB_SHUTDOWN_S5 };
if (!tboot_enabled())
- return;
+ return 0;
tboot_copy_fadt(&acpi_gbl_FADT);
tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
@@ -292,10 +292,11 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
if (sleep_state >= ACPI_S_STATE_COUNT ||
acpi_shutdown_map[sleep_state] == -1) {
pr_warning("unsupported sleep state 0x%x\n", sleep_state);
- return;
+ return -1;
}
tboot_shutdown(acpi_shutdown_map[sleep_state]);
+ return 0;
}
static atomic_t ap_wfs_count;
@@ -345,6 +346,8 @@ static __init int tboot_late_init(void)
atomic_set(&ap_wfs_count, 0);
register_hotcpu_notifier(&tboot_cpu_notifier);
+
+ acpi_os_set_prepare_sleep(&tboot_sleep);
return 0;
}
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 73920e4c6dc5..9d9d2f9e77a5 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -162,7 +162,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
{
const struct desc_struct *tls;
- if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
(pos % sizeof(struct user_desc)) != 0 ||
(count % sizeof(struct user_desc)) != 0)
return -EINVAL;
@@ -197,7 +197,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
const struct user_desc *info;
- if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
(pos % sizeof(struct user_desc)) != 0 ||
(count % sizeof(struct user_desc)) != 0)
return -EINVAL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 860f126ca233..ff9281f16029 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
* traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
* On nmi (interrupt 2), do_trap should not be called.
*/
- if (trapnr < 6)
+ if (trapnr < X86_TRAP_UD)
goto vm86_trap;
goto trap_signal;
}
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
trap_signal:
#endif
/*
- * We want error_code and trap_no set for userspace faults and
+ * We want error_code and trap_nr set for userspace faults and
* kernelspace faults which result in die(), but not
* kernelspace faults which are fixed up. die() gives the
* process no chance to handle the signal and notice the
@@ -141,7 +141,7 @@ trap_signal:
* delivered, faults. See also do_general_protection below.
*/
tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
+ tsk->thread.trap_nr = trapnr;
#ifdef CONFIG_X86_64
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -164,7 +164,7 @@ trap_signal:
kernel_trap:
if (!fixup_exception(regs)) {
tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
+ tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
}
return;
@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
do_trap(trapnr, signr, str, regs, error_code, &info); \
}
-DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
-DO_ERROR(4, SIGSEGV, "overflow", overflow)
-DO_ERROR(5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
-DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
+ regs->ip)
+DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
+DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
+ regs->ip)
+DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
+ coprocessor_segment_overrun)
+DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
#ifdef CONFIG_X86_32
-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
#endif
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
+ BUS_ADRALN, 0)
#ifdef CONFIG_X86_64
/* Runs on IST stack */
dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
{
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
- 12, SIGBUS) == NOTIFY_STOP)
+ X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
return;
preempt_conditional_sti(regs);
- do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+ do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
preempt_conditional_cli(regs);
}
@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
struct task_struct *tsk = current;
/* Return not checked because double check cannot be ignored */
- notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
+ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 8;
+ tsk->thread.trap_nr = X86_TRAP_DF;
/*
* This is always a kernel trap and never fixable (and thus must
@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
goto gp_in_kernel;
tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 13;
+ tsk->thread.trap_nr = X86_TRAP_GP;
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
@@ -291,9 +295,9 @@ gp_in_kernel:
return;
tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 13;
- if (notify_die(DIE_GPF, "general protection fault", regs,
- error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ tsk->thread.trap_nr = X86_TRAP_GP;
+ if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+ X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
return;
die("general protection fault", regs, error_code);
}
@@ -302,13 +306,13 @@ gp_in_kernel:
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
- == NOTIFY_STOP)
+ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
+ SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
- == NOTIFY_STOP)
+ if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
+ SIGTRAP) == NOTIFY_STOP)
return;
/*
@@ -317,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
*/
debug_stack_usage_inc();
preempt_conditional_sti(regs);
- do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
preempt_conditional_cli(regs);
debug_stack_usage_dec();
}
@@ -422,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
preempt_conditional_sti(regs);
if (regs->flags & X86_VM_MASK) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs,
- error_code, 1);
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+ X86_TRAP_DB);
preempt_conditional_cli(regs);
debug_stack_usage_dec();
return;
@@ -460,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
struct task_struct *task = current;
siginfo_t info;
unsigned short err;
- char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
+ char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
+ "simd exception";
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
return;
@@ -470,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
- task->thread.trap_no = trapnr;
+ task->thread.trap_nr = trapnr;
die(str, regs, error_code);
}
return;
@@ -480,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
* Save the info for the exception handler and clear the error.
*/
save_init_fpu(task);
- task->thread.trap_no = trapnr;
+ task->thread.trap_nr = trapnr;
task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *)regs->ip;
- if (trapnr == 16) {
+ if (trapnr == X86_TRAP_MF) {
unsigned short cwd, swd;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -529,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
info.si_code = FPE_FLTRES;
} else {
/*
- * If we're using IRQ 13, or supposedly even some trap 16
- * implementations, it's possible we get a spurious trap...
+ * If we're using IRQ 13, or supposedly even some trap
+ * X86_TRAP_MF implementations, it's possible
+ * we get a spurious trap, which is not an error.
*/
- return; /* Spurious trap, no error */
+ return;
}
force_sig_info(SIGFPE, &info, task);
}
@@ -543,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
ignore_fpu_irq = 1;
#endif
- math_error(regs, error_code, 16);
+ math_error(regs, error_code, X86_TRAP_MF);
}
dotraplinkage void
do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
{
- math_error(regs, error_code, 19);
+ math_error(regs, error_code, X86_TRAP_XF);
}
dotraplinkage void
@@ -643,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
info.si_errno = 0;
info.si_code = ILL_BADSTK;
info.si_addr = NULL;
- if (notify_die(DIE_TRAP, "iret exception",
- regs, error_code, 32, SIGILL) == NOTIFY_STOP)
+ if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
+ X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
return;
- do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
+ do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
+ &info);
}
#endif
/* Set of traps needed for early debugging. */
void __init early_trap_init(void)
{
- set_intr_gate_ist(1, &debug, DEBUG_STACK);
+ set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
/* int3 can be called from all */
- set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
- set_intr_gate(14, &page_fault);
+ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+ set_intr_gate(X86_TRAP_PF, &page_fault);
load_idt(&idt_descr);
}
@@ -672,30 +679,30 @@ void __init trap_init(void)
early_iounmap(p, 4);
#endif
- set_intr_gate(0, &divide_error);
- set_intr_gate_ist(2, &nmi, NMI_STACK);
+ set_intr_gate(X86_TRAP_DE, &divide_error);
+ set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
/* int4 can be called from all */
- set_system_intr_gate(4, &overflow);
- set_intr_gate(5, &bounds);
- set_intr_gate(6, &invalid_op);
- set_intr_gate(7, &device_not_available);
+ set_system_intr_gate(X86_TRAP_OF, &overflow);
+ set_intr_gate(X86_TRAP_BR, &bounds);
+ set_intr_gate(X86_TRAP_UD, &invalid_op);
+ set_intr_gate(X86_TRAP_NM, &device_not_available);
#ifdef CONFIG_X86_32
- set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
+ set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
#else
- set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
+ set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
#endif
- set_intr_gate(9, &coprocessor_segment_overrun);
- set_intr_gate(10, &invalid_TSS);
- set_intr_gate(11, &segment_not_present);
- set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
- set_intr_gate(13, &general_protection);
- set_intr_gate(15, &spurious_interrupt_bug);
- set_intr_gate(16, &coprocessor_error);
- set_intr_gate(17, &alignment_check);
+ set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
+ set_intr_gate(X86_TRAP_TS, &invalid_TSS);
+ set_intr_gate(X86_TRAP_NP, &segment_not_present);
+ set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
+ set_intr_gate(X86_TRAP_GP, &general_protection);
+ set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
+ set_intr_gate(X86_TRAP_MF, &coprocessor_error);
+ set_intr_gate(X86_TRAP_AC, &alignment_check);
#ifdef CONFIG_X86_MCE
- set_intr_gate_ist(18, &machine_check, MCE_STACK);
+ set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
#endif
- set_intr_gate(19, &simd_coprocessor_error);
+ set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
/* Reserve all the builtin and the syscall vector: */
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
@@ -720,7 +727,7 @@ void __init trap_init(void)
#ifdef CONFIG_X86_64
memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
- set_nmi_gate(1, &debug);
- set_nmi_gate(3, &int3);
+ set_nmi_gate(X86_TRAP_DB, &debug);
+ set_nmi_gate(X86_TRAP_BP, &int3);
#endif
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 899a03f2d181..fc0a147e3727 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void)
clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
}
+
+ /*
+ * Trust the results of the earlier calibration on systems
+ * exporting a reliable TSC.
+ */
+ if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+ clocksource_register_khz(&clocksource_tsc, tsc_khz);
+ return 0;
+ }
+
schedule_delayed_work(&tsc_irqwork, 0);
return 0;
}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 328cb37bb827..255f58ae71e8 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -569,7 +569,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
}
if (trapno != 1)
return 1; /* we let this handle by the calling routine */
- current->thread.trap_no = trapno;
+ current->thread.trap_nr = trapno;
current->thread.error_code = error_code;
force_sig(SIGTRAP, current);
return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba9393564..f386dc49f988 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
#include "vsyscall_trace.h"
DEFINE_VVAR(int, vgetcpu_mode);
-DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
-{
- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
-};
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
void update_vsyscall_tz(void)
{
- unsigned long flags;
-
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
- /* sys_tz has changed */
vsyscall_gtod_data.sys_tz = sys_tz;
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
- unsigned long flags;
+ struct timespec monotonic;
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ write_seqcount_begin(&vsyscall_gtod_data.seq);
/* copy vsyscall data */
vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
vsyscall_gtod_data.clock.mask = clock->mask;
vsyscall_gtod_data.clock.mult = mult;
vsyscall_gtod_data.clock.shift = clock->shift;
+
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
- vsyscall_gtod_data.wall_to_monotonic = *wtm;
+
+ monotonic = timespec_add(*wall_time, *wtm);
+ vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
+ vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
+
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
+ vsyscall_gtod_data.monotonic_time_coarse =
+ timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ write_seqcount_end(&vsyscall_gtod_data.seq);
}
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
@@ -153,7 +152,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
thread->error_code = 6; /* user fault, no page, write */
thread->cr2 = ptr;
- thread->trap_no = 14;
+ thread->trap_nr = X86_TRAP_PF;
memset(&info, 0, sizeof(info));
info.si_signo = SIGSEGV;
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 7718541541d4..9b868124128d 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -28,6 +28,7 @@
#include <linux/regset.h>
#include <asm/uaccess.h>
+#include <asm/traps.h>
#include <asm/desc.h>
#include <asm/user.h>
#include <asm/i387.h>
@@ -269,7 +270,7 @@ void math_emulate(struct math_emu_info *info)
FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */
RE_ENTRANT_CHECK_OFF;
- current->thread.trap_no = 16;
+ current->thread.trap_nr = X86_TRAP_MF;
current->thread.error_code = 0;
send_sig(SIGFPE, current, 1);
return;
@@ -662,7 +663,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
void math_abort(struct math_emu_info *info, unsigned int signal)
{
FPU_EIP = FPU_ORIG_EIP;
- current->thread.trap_no = 16;
+ current->thread.trap_nr = X86_TRAP_MF;
current->thread.error_code = 0;
send_sig(signal, current, 1);
RE_ENTRANT_CHECK_OFF;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f0b4caf85c1a..3ecfd1aaf214 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -615,7 +615,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
dump_pagetable(address);
tsk->thread.cr2 = address;
- tsk->thread.trap_no = 14;
+ tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code;
if (__die("Bad pagetable", regs, error_code))
@@ -636,7 +636,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
if (current_thread_info()->sig_on_uaccess_error && signal) {
- tsk->thread.trap_no = 14;
+ tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
tsk->thread.cr2 = address;
@@ -676,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
tsk->thread.cr2 = address;
- tsk->thread.trap_no = 14;
+ tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code;
sig = SIGKILL;
@@ -754,7 +754,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
/* Kernel addresses are always protection faults: */
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
- tsk->thread.trap_no = 14;
+ tsk->thread.trap_nr = X86_TRAP_PF;
force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
@@ -838,7 +838,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 14;
+ tsk->thread.trap_nr = X86_TRAP_PF;
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 1c1c4f46a7c1..efb5b4b93711 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -70,7 +70,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
return;
pxm = pa->proximity_domain;
apic_id = pa->apic_id;
- if (!cpu_has_x2apic && (apic_id >= 0xff)) {
+ if (!apic->apic_id_valid(apic_id)) {
printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n",
pxm, apic_id);
return;
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index bff89dfe3619..d6aa6e8315d1 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -67,7 +67,7 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
{
struct stack_frame_ia32 *head;
- /* User process is 32-bit */
+ /* User process is IA32 */
if (!current || !test_thread_flag(TIF_IA32))
return 0;
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 7cce722667b8..a4bee53c2e54 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
+#include <linux/debugfs.h>
+#include <linux/mutex.h>
#include <asm/geode.h>
#include <asm/setup.h>
@@ -31,6 +33,15 @@ EXPORT_SYMBOL_GPL(olpc_platform_info);
static DEFINE_SPINLOCK(ec_lock);
+/* debugfs interface to EC commands */
+#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */
+#define EC_MAX_CMD_REPLY (8)
+
+static struct dentry *ec_debugfs_dir;
+static DEFINE_MUTEX(ec_debugfs_cmd_lock);
+static unsigned char ec_debugfs_resp[EC_MAX_CMD_REPLY];
+static unsigned int ec_debugfs_resp_bytes;
+
/* EC event mask to be applied during suspend (defining wakeup sources). */
static u16 ec_wakeup_mask;
@@ -269,6 +280,91 @@ int olpc_ec_sci_query(u16 *sci_value)
}
EXPORT_SYMBOL_GPL(olpc_ec_sci_query);
+static ssize_t ec_debugfs_cmd_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ int i, m;
+ unsigned char ec_cmd[EC_MAX_CMD_ARGS];
+ unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
+ char cmdbuf[64];
+ int ec_cmd_bytes;
+
+ mutex_lock(&ec_debugfs_cmd_lock);
+
+ size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size);
+
+ m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0],
+ &ec_debugfs_resp_bytes,
+ &ec_cmd_int[1], &ec_cmd_int[2], &ec_cmd_int[3],
+ &ec_cmd_int[4], &ec_cmd_int[5]);
+ if (m < 2 || ec_debugfs_resp_bytes > EC_MAX_CMD_REPLY) {
+ /* reset to prevent overflow on read */
+ ec_debugfs_resp_bytes = 0;
+
+ printk(KERN_DEBUG "olpc-ec: bad ec cmd: "
+ "cmd:response-count [arg1 [arg2 ...]]\n");
+ size = -EINVAL;
+ goto out;
+ }
+
+ /* convert scanf'd ints to char */
+ ec_cmd_bytes = m - 2;
+ for (i = 0; i <= ec_cmd_bytes; i++)
+ ec_cmd[i] = ec_cmd_int[i];
+
+ printk(KERN_DEBUG "olpc-ec: debugfs cmd 0x%02x with %d args "
+ "%02x %02x %02x %02x %02x, want %d returns\n",
+ ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], ec_cmd[3],
+ ec_cmd[4], ec_cmd[5], ec_debugfs_resp_bytes);
+
+ olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1],
+ ec_cmd_bytes, ec_debugfs_resp, ec_debugfs_resp_bytes);
+
+ printk(KERN_DEBUG "olpc-ec: response "
+ "%02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n",
+ ec_debugfs_resp[0], ec_debugfs_resp[1], ec_debugfs_resp[2],
+ ec_debugfs_resp[3], ec_debugfs_resp[4], ec_debugfs_resp[5],
+ ec_debugfs_resp[6], ec_debugfs_resp[7], ec_debugfs_resp_bytes);
+
+out:
+ mutex_unlock(&ec_debugfs_cmd_lock);
+ return size;
+}
+
+static ssize_t ec_debugfs_cmd_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ unsigned int i, r;
+ char *rp;
+ char respbuf[64];
+
+ mutex_lock(&ec_debugfs_cmd_lock);
+ rp = respbuf;
+ rp += sprintf(rp, "%02x", ec_debugfs_resp[0]);
+ for (i = 1; i < ec_debugfs_resp_bytes; i++)
+ rp += sprintf(rp, ", %02x", ec_debugfs_resp[i]);
+ mutex_unlock(&ec_debugfs_cmd_lock);
+ rp += sprintf(rp, "\n");
+
+ r = rp - respbuf;
+ return simple_read_from_buffer(buf, size, ppos, respbuf, r);
+}
+
+static const struct file_operations ec_debugfs_genops = {
+ .write = ec_debugfs_cmd_write,
+ .read = ec_debugfs_cmd_read,
+};
+
+static void setup_debugfs(void)
+{
+ ec_debugfs_dir = debugfs_create_dir("olpc-ec", 0);
+ if (ec_debugfs_dir == ERR_PTR(-ENODEV))
+ return;
+
+ debugfs_create_file("cmd", 0600, ec_debugfs_dir, NULL,
+ &ec_debugfs_genops);
+}
+
static int olpc_ec_suspend(void)
{
return olpc_ec_mask_write(ec_wakeup_mask);
@@ -372,6 +468,7 @@ static int __init olpc_init(void)
}
register_syscore_ops(&olpc_syscore_ops);
+ setup_debugfs();
return 0;
}
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
index 564b2476fede..3236aebc828d 100644
--- a/arch/x86/syscalls/Makefile
+++ b/arch/x86/syscalls/Makefile
@@ -10,8 +10,10 @@ syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' $< $@ \
- $(syshdr_abi_$(basetarget)) $(syshdr_pfx_$(basetarget))
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abi_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
@@ -24,18 +26,28 @@ syshdr_pfx_unistd_32_ia32 := ia32_
$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
$(call if_changed,syshdr)
-syshdr_abi_unistd_64 := 64
+syshdr_abi_unistd_x32 := common,x32
+syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
+$(out)/unistd_x32.h: $(syscall64) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_abi_unistd_64 := common,64
$(out)/unistd_64.h: $(syscall64) $(syshdr)
$(call if_changed,syshdr)
+syshdr_abi_unistd_64_x32 := x32
+syshdr_pfx_unistd_64_x32 := x32_
+$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
+ $(call if_changed,syshdr)
+
$(out)/syscalls_32.h: $(syscall32) $(systbl)
$(call if_changed,systbl)
$(out)/syscalls_64.h: $(syscall64) $(systbl)
$(call if_changed,systbl)
-syshdr-y += unistd_32.h unistd_64.h
+syshdr-y += unistd_32.h unistd_64.h unistd_x32.h
syshdr-y += syscalls_32.h
-syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h
+syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64) += syscalls_64.h
targets += $(syshdr-y)
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index e7e67cc3c14b..29f9f0554f7d 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -181,7 +181,7 @@
172 i386 prctl sys_prctl
173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn
174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction
-175 i386 rt_sigprocmask sys_rt_sigprocmask sys32_rt_sigprocmask
+175 i386 rt_sigprocmask sys_rt_sigprocmask
176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending
177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index b440a8f7eefa..dd29a9ea27c5 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -4,317 +4,350 @@
# The format is:
# <number> <abi> <name> <entry point>
#
-# The abi is always "64" for this file (for now.)
+# The abi is "common", "64" or "x32" for this file.
#
-0 64 read sys_read
-1 64 write sys_write
-2 64 open sys_open
-3 64 close sys_close
-4 64 stat sys_newstat
-5 64 fstat sys_newfstat
-6 64 lstat sys_newlstat
-7 64 poll sys_poll
-8 64 lseek sys_lseek
-9 64 mmap sys_mmap
-10 64 mprotect sys_mprotect
-11 64 munmap sys_munmap
-12 64 brk sys_brk
+0 common read sys_read
+1 common write sys_write
+2 common open sys_open
+3 common close sys_close
+4 common stat sys_newstat
+5 common fstat sys_newfstat
+6 common lstat sys_newlstat
+7 common poll sys_poll
+8 common lseek sys_lseek
+9 common mmap sys_mmap
+10 common mprotect sys_mprotect
+11 common munmap sys_munmap
+12 common brk sys_brk
13 64 rt_sigaction sys_rt_sigaction
-14 64 rt_sigprocmask sys_rt_sigprocmask
+14 common rt_sigprocmask sys_rt_sigprocmask
15 64 rt_sigreturn stub_rt_sigreturn
16 64 ioctl sys_ioctl
-17 64 pread64 sys_pread64
-18 64 pwrite64 sys_pwrite64
+17 common pread64 sys_pread64
+18 common pwrite64 sys_pwrite64
19 64 readv sys_readv
20 64 writev sys_writev
-21 64 access sys_access
-22 64 pipe sys_pipe
-23 64 select sys_select
-24 64 sched_yield sys_sched_yield
-25 64 mremap sys_mremap
-26 64 msync sys_msync
-27 64 mincore sys_mincore
-28 64 madvise sys_madvise
-29 64 shmget sys_shmget
-30 64 shmat sys_shmat
-31 64 shmctl sys_shmctl
-32 64 dup sys_dup
-33 64 dup2 sys_dup2
-34 64 pause sys_pause
-35 64 nanosleep sys_nanosleep
-36 64 getitimer sys_getitimer
-37 64 alarm sys_alarm
-38 64 setitimer sys_setitimer
-39 64 getpid sys_getpid
-40 64 sendfile sys_sendfile64
-41 64 socket sys_socket
-42 64 connect sys_connect
-43 64 accept sys_accept
-44 64 sendto sys_sendto
+21 common access sys_access
+22 common pipe sys_pipe
+23 common select sys_select
+24 common sched_yield sys_sched_yield
+25 common mremap sys_mremap
+26 common msync sys_msync
+27 common mincore sys_mincore
+28 common madvise sys_madvise
+29 common shmget sys_shmget
+30 common shmat sys_shmat
+31 common shmctl sys_shmctl
+32 common dup sys_dup
+33 common dup2 sys_dup2
+34 common pause sys_pause
+35 common nanosleep sys_nanosleep
+36 common getitimer sys_getitimer
+37 common alarm sys_alarm
+38 common setitimer sys_setitimer
+39 common getpid sys_getpid
+40 common sendfile sys_sendfile64
+41 common socket sys_socket
+42 common connect sys_connect
+43 common accept sys_accept
+44 common sendto sys_sendto
45 64 recvfrom sys_recvfrom
46 64 sendmsg sys_sendmsg
47 64 recvmsg sys_recvmsg
-48 64 shutdown sys_shutdown
-49 64 bind sys_bind
-50 64 listen sys_listen
-51 64 getsockname sys_getsockname
-52 64 getpeername sys_getpeername
-53 64 socketpair sys_socketpair
-54 64 setsockopt sys_setsockopt
-55 64 getsockopt sys_getsockopt
-56 64 clone stub_clone
-57 64 fork stub_fork
-58 64 vfork stub_vfork
+48 common shutdown sys_shutdown
+49 common bind sys_bind
+50 common listen sys_listen
+51 common getsockname sys_getsockname
+52 common getpeername sys_getpeername
+53 common socketpair sys_socketpair
+54 common setsockopt sys_setsockopt
+55 common getsockopt sys_getsockopt
+56 common clone stub_clone
+57 common fork stub_fork
+58 common vfork stub_vfork
59 64 execve stub_execve
-60 64 exit sys_exit
-61 64 wait4 sys_wait4
-62 64 kill sys_kill
-63 64 uname sys_newuname
-64 64 semget sys_semget
-65 64 semop sys_semop
-66 64 semctl sys_semctl
-67 64 shmdt sys_shmdt
-68 64 msgget sys_msgget
-69 64 msgsnd sys_msgsnd
-70 64 msgrcv sys_msgrcv
-71 64 msgctl sys_msgctl
-72 64 fcntl sys_fcntl
-73 64 flock sys_flock
-74 64 fsync sys_fsync
-75 64 fdatasync sys_fdatasync
-76 64 truncate sys_truncate
-77 64 ftruncate sys_ftruncate
-78 64 getdents sys_getdents
-79 64 getcwd sys_getcwd
-80 64 chdir sys_chdir
-81 64 fchdir sys_fchdir
-82 64 rename sys_rename
-83 64 mkdir sys_mkdir
-84 64 rmdir sys_rmdir
-85 64 creat sys_creat
-86 64 link sys_link
-87 64 unlink sys_unlink
-88 64 symlink sys_symlink
-89 64 readlink sys_readlink
-90 64 chmod sys_chmod
-91 64 fchmod sys_fchmod
-92 64 chown sys_chown
-93 64 fchown sys_fchown
-94 64 lchown sys_lchown
-95 64 umask sys_umask
-96 64 gettimeofday sys_gettimeofday
-97 64 getrlimit sys_getrlimit
-98 64 getrusage sys_getrusage
-99 64 sysinfo sys_sysinfo
-100 64 times sys_times
+60 common exit sys_exit
+61 common wait4 sys_wait4
+62 common kill sys_kill
+63 common uname sys_newuname
+64 common semget sys_semget
+65 common semop sys_semop
+66 common semctl sys_semctl
+67 common shmdt sys_shmdt
+68 common msgget sys_msgget
+69 common msgsnd sys_msgsnd
+70 common msgrcv sys_msgrcv
+71 common msgctl sys_msgctl
+72 common fcntl sys_fcntl
+73 common flock sys_flock
+74 common fsync sys_fsync
+75 common fdatasync sys_fdatasync
+76 common truncate sys_truncate
+77 common ftruncate sys_ftruncate
+78 common getdents sys_getdents
+79 common getcwd sys_getcwd
+80 common chdir sys_chdir
+81 common fchdir sys_fchdir
+82 common rename sys_rename
+83 common mkdir sys_mkdir
+84 common rmdir sys_rmdir
+85 common creat sys_creat
+86 common link sys_link
+87 common unlink sys_unlink
+88 common symlink sys_symlink
+89 common readlink sys_readlink
+90 common chmod sys_chmod
+91 common fchmod sys_fchmod
+92 common chown sys_chown
+93 common fchown sys_fchown
+94 common lchown sys_lchown
+95 common umask sys_umask
+96 common gettimeofday sys_gettimeofday
+97 common getrlimit sys_getrlimit
+98 common getrusage sys_getrusage
+99 common sysinfo sys_sysinfo
+100 common times sys_times
101 64 ptrace sys_ptrace
-102 64 getuid sys_getuid
-103 64 syslog sys_syslog
-104 64 getgid sys_getgid
-105 64 setuid sys_setuid
-106 64 setgid sys_setgid
-107 64 geteuid sys_geteuid
-108 64 getegid sys_getegid
-109 64 setpgid sys_setpgid
-110 64 getppid sys_getppid
-111 64 getpgrp sys_getpgrp
-112 64 setsid sys_setsid
-113 64 setreuid sys_setreuid
-114 64 setregid sys_setregid
-115 64 getgroups sys_getgroups
-116 64 setgroups sys_setgroups
-117 64 setresuid sys_setresuid
-118 64 getresuid sys_getresuid
-119 64 setresgid sys_setresgid
-120 64 getresgid sys_getresgid
-121 64 getpgid sys_getpgid
-122 64 setfsuid sys_setfsuid
-123 64 setfsgid sys_setfsgid
-124 64 getsid sys_getsid
-125 64 capget sys_capget
-126 64 capset sys_capset
+102 common getuid sys_getuid
+103 common syslog sys_syslog
+104 common getgid sys_getgid
+105 common setuid sys_setuid
+106 common setgid sys_setgid
+107 common geteuid sys_geteuid
+108 common getegid sys_getegid
+109 common setpgid sys_setpgid
+110 common getppid sys_getppid
+111 common getpgrp sys_getpgrp
+112 common setsid sys_setsid
+113 common setreuid sys_setreuid
+114 common setregid sys_setregid
+115 common getgroups sys_getgroups
+116 common setgroups sys_setgroups
+117 common setresuid sys_setresuid
+118 common getresuid sys_getresuid
+119 common setresgid sys_setresgid
+120 common getresgid sys_getresgid
+121 common getpgid sys_getpgid
+122 common setfsuid sys_setfsuid
+123 common setfsgid sys_setfsgid
+124 common getsid sys_getsid
+125 common capget sys_capget
+126 common capset sys_capset
127 64 rt_sigpending sys_rt_sigpending
128 64 rt_sigtimedwait sys_rt_sigtimedwait
129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
-130 64 rt_sigsuspend sys_rt_sigsuspend
+130 common rt_sigsuspend sys_rt_sigsuspend
131 64 sigaltstack stub_sigaltstack
-132 64 utime sys_utime
-133 64 mknod sys_mknod
+132 common utime sys_utime
+133 common mknod sys_mknod
134 64 uselib
-135 64 personality sys_personality
-136 64 ustat sys_ustat
-137 64 statfs sys_statfs
-138 64 fstatfs sys_fstatfs
-139 64 sysfs sys_sysfs
-140 64 getpriority sys_getpriority
-141 64 setpriority sys_setpriority
-142 64 sched_setparam sys_sched_setparam
-143 64 sched_getparam sys_sched_getparam
-144 64 sched_setscheduler sys_sched_setscheduler
-145 64 sched_getscheduler sys_sched_getscheduler
-146 64 sched_get_priority_max sys_sched_get_priority_max
-147 64 sched_get_priority_min sys_sched_get_priority_min
-148 64 sched_rr_get_interval sys_sched_rr_get_interval
-149 64 mlock sys_mlock
-150 64 munlock sys_munlock
-151 64 mlockall sys_mlockall
-152 64 munlockall sys_munlockall
-153 64 vhangup sys_vhangup
-154 64 modify_ldt sys_modify_ldt
-155 64 pivot_root sys_pivot_root
+135 common personality sys_personality
+136 common ustat sys_ustat
+137 common statfs sys_statfs
+138 common fstatfs sys_fstatfs
+139 common sysfs sys_sysfs
+140 common getpriority sys_getpriority
+141 common setpriority sys_setpriority
+142 common sched_setparam sys_sched_setparam
+143 common sched_getparam sys_sched_getparam
+144 common sched_setscheduler sys_sched_setscheduler
+145 common sched_getscheduler sys_sched_getscheduler
+146 common sched_get_priority_max sys_sched_get_priority_max
+147 common sched_get_priority_min sys_sched_get_priority_min
+148 common sched_rr_get_interval sys_sched_rr_get_interval
+149 common mlock sys_mlock
+150 common munlock sys_munlock
+151 common mlockall sys_mlockall
+152 common munlockall sys_munlockall
+153 common vhangup sys_vhangup
+154 common modify_ldt sys_modify_ldt
+155 common pivot_root sys_pivot_root
156 64 _sysctl sys_sysctl
-157 64 prctl sys_prctl
-158 64 arch_prctl sys_arch_prctl
-159 64 adjtimex sys_adjtimex
-160 64 setrlimit sys_setrlimit
-161 64 chroot sys_chroot
-162 64 sync sys_sync
-163 64 acct sys_acct
-164 64 settimeofday sys_settimeofday
-165 64 mount sys_mount
-166 64 umount2 sys_umount
-167 64 swapon sys_swapon
-168 64 swapoff sys_swapoff
-169 64 reboot sys_reboot
-170 64 sethostname sys_sethostname
-171 64 setdomainname sys_setdomainname
-172 64 iopl stub_iopl
-173 64 ioperm sys_ioperm
+157 common prctl sys_prctl
+158 common arch_prctl sys_arch_prctl
+159 common adjtimex sys_adjtimex
+160 common setrlimit sys_setrlimit
+161 common chroot sys_chroot
+162 common sync sys_sync
+163 common acct sys_acct
+164 common settimeofday sys_settimeofday
+165 common mount sys_mount
+166 common umount2 sys_umount
+167 common swapon sys_swapon
+168 common swapoff sys_swapoff
+169 common reboot sys_reboot
+170 common sethostname sys_sethostname
+171 common setdomainname sys_setdomainname
+172 common iopl stub_iopl
+173 common ioperm sys_ioperm
174 64 create_module
-175 64 init_module sys_init_module
-176 64 delete_module sys_delete_module
+175 common init_module sys_init_module
+176 common delete_module sys_delete_module
177 64 get_kernel_syms
178 64 query_module
-179 64 quotactl sys_quotactl
+179 common quotactl sys_quotactl
180 64 nfsservctl
-181 64 getpmsg
-182 64 putpmsg
-183 64 afs_syscall
-184 64 tuxcall
-185 64 security
-186 64 gettid sys_gettid
-187 64 readahead sys_readahead
-188 64 setxattr sys_setxattr
-189 64 lsetxattr sys_lsetxattr
-190 64 fsetxattr sys_fsetxattr
-191 64 getxattr sys_getxattr
-192 64 lgetxattr sys_lgetxattr
-193 64 fgetxattr sys_fgetxattr
-194 64 listxattr sys_listxattr
-195 64 llistxattr sys_llistxattr
-196 64 flistxattr sys_flistxattr
-197 64 removexattr sys_removexattr
-198 64 lremovexattr sys_lremovexattr
-199 64 fremovexattr sys_fremovexattr
-200 64 tkill sys_tkill
-201 64 time sys_time
-202 64 futex sys_futex
-203 64 sched_setaffinity sys_sched_setaffinity
-204 64 sched_getaffinity sys_sched_getaffinity
+181 common getpmsg
+182 common putpmsg
+183 common afs_syscall
+184 common tuxcall
+185 common security
+186 common gettid sys_gettid
+187 common readahead sys_readahead
+188 common setxattr sys_setxattr
+189 common lsetxattr sys_lsetxattr
+190 common fsetxattr sys_fsetxattr
+191 common getxattr sys_getxattr
+192 common lgetxattr sys_lgetxattr
+193 common fgetxattr sys_fgetxattr
+194 common listxattr sys_listxattr
+195 common llistxattr sys_llistxattr
+196 common flistxattr sys_flistxattr
+197 common removexattr sys_removexattr
+198 common lremovexattr sys_lremovexattr
+199 common fremovexattr sys_fremovexattr
+200 common tkill sys_tkill
+201 common time sys_time
+202 common futex sys_futex
+203 common sched_setaffinity sys_sched_setaffinity
+204 common sched_getaffinity sys_sched_getaffinity
205 64 set_thread_area
-206 64 io_setup sys_io_setup
-207 64 io_destroy sys_io_destroy
-208 64 io_getevents sys_io_getevents
-209 64 io_submit sys_io_submit
-210 64 io_cancel sys_io_cancel
+206 common io_setup sys_io_setup
+207 common io_destroy sys_io_destroy
+208 common io_getevents sys_io_getevents
+209 common io_submit sys_io_submit
+210 common io_cancel sys_io_cancel
211 64 get_thread_area
-212 64 lookup_dcookie sys_lookup_dcookie
-213 64 epoll_create sys_epoll_create
+212 common lookup_dcookie sys_lookup_dcookie
+213 common epoll_create sys_epoll_create
214 64 epoll_ctl_old
215 64 epoll_wait_old
-216 64 remap_file_pages sys_remap_file_pages
-217 64 getdents64 sys_getdents64
-218 64 set_tid_address sys_set_tid_address
-219 64 restart_syscall sys_restart_syscall
-220 64 semtimedop sys_semtimedop
-221 64 fadvise64 sys_fadvise64
+216 common remap_file_pages sys_remap_file_pages
+217 common getdents64 sys_getdents64
+218 common set_tid_address sys_set_tid_address
+219 common restart_syscall sys_restart_syscall
+220 common semtimedop sys_semtimedop
+221 common fadvise64 sys_fadvise64
222 64 timer_create sys_timer_create
-223 64 timer_settime sys_timer_settime
-224 64 timer_gettime sys_timer_gettime
-225 64 timer_getoverrun sys_timer_getoverrun
-226 64 timer_delete sys_timer_delete
-227 64 clock_settime sys_clock_settime
-228 64 clock_gettime sys_clock_gettime
-229 64 clock_getres sys_clock_getres
-230 64 clock_nanosleep sys_clock_nanosleep
-231 64 exit_group sys_exit_group
-232 64 epoll_wait sys_epoll_wait
-233 64 epoll_ctl sys_epoll_ctl
-234 64 tgkill sys_tgkill
-235 64 utimes sys_utimes
+223 common timer_settime sys_timer_settime
+224 common timer_gettime sys_timer_gettime
+225 common timer_getoverrun sys_timer_getoverrun
+226 common timer_delete sys_timer_delete
+227 common clock_settime sys_clock_settime
+228 common clock_gettime sys_clock_gettime
+229 common clock_getres sys_clock_getres
+230 common clock_nanosleep sys_clock_nanosleep
+231 common exit_group sys_exit_group
+232 common epoll_wait sys_epoll_wait
+233 common epoll_ctl sys_epoll_ctl
+234 common tgkill sys_tgkill
+235 common utimes sys_utimes
236 64 vserver
-237 64 mbind sys_mbind
-238 64 set_mempolicy sys_set_mempolicy
-239 64 get_mempolicy sys_get_mempolicy
-240 64 mq_open sys_mq_open
-241 64 mq_unlink sys_mq_unlink
-242 64 mq_timedsend sys_mq_timedsend
-243 64 mq_timedreceive sys_mq_timedreceive
+237 common mbind sys_mbind
+238 common set_mempolicy sys_set_mempolicy
+239 common get_mempolicy sys_get_mempolicy
+240 common mq_open sys_mq_open
+241 common mq_unlink sys_mq_unlink
+242 common mq_timedsend sys_mq_timedsend
+243 common mq_timedreceive sys_mq_timedreceive
244 64 mq_notify sys_mq_notify
-245 64 mq_getsetattr sys_mq_getsetattr
+245 common mq_getsetattr sys_mq_getsetattr
246 64 kexec_load sys_kexec_load
247 64 waitid sys_waitid
-248 64 add_key sys_add_key
-249 64 request_key sys_request_key
-250 64 keyctl sys_keyctl
-251 64 ioprio_set sys_ioprio_set
-252 64 ioprio_get sys_ioprio_get
-253 64 inotify_init sys_inotify_init
-254 64 inotify_add_watch sys_inotify_add_watch
-255 64 inotify_rm_watch sys_inotify_rm_watch
-256 64 migrate_pages sys_migrate_pages
-257 64 openat sys_openat
-258 64 mkdirat sys_mkdirat
-259 64 mknodat sys_mknodat
-260 64 fchownat sys_fchownat
-261 64 futimesat sys_futimesat
-262 64 newfstatat sys_newfstatat
-263 64 unlinkat sys_unlinkat
-264 64 renameat sys_renameat
-265 64 linkat sys_linkat
-266 64 symlinkat sys_symlinkat
-267 64 readlinkat sys_readlinkat
-268 64 fchmodat sys_fchmodat
-269 64 faccessat sys_faccessat
-270 64 pselect6 sys_pselect6
-271 64 ppoll sys_ppoll
-272 64 unshare sys_unshare
+248 common add_key sys_add_key
+249 common request_key sys_request_key
+250 common keyctl sys_keyctl
+251 common ioprio_set sys_ioprio_set
+252 common ioprio_get sys_ioprio_get
+253 common inotify_init sys_inotify_init
+254 common inotify_add_watch sys_inotify_add_watch
+255 common inotify_rm_watch sys_inotify_rm_watch
+256 common migrate_pages sys_migrate_pages
+257 common openat sys_openat
+258 common mkdirat sys_mkdirat
+259 common mknodat sys_mknodat
+260 common fchownat sys_fchownat
+261 common futimesat sys_futimesat
+262 common newfstatat sys_newfstatat
+263 common unlinkat sys_unlinkat
+264 common renameat sys_renameat
+265 common linkat sys_linkat
+266 common symlinkat sys_symlinkat
+267 common readlinkat sys_readlinkat
+268 common fchmodat sys_fchmodat
+269 common faccessat sys_faccessat
+270 common pselect6 sys_pselect6
+271 common ppoll sys_ppoll
+272 common unshare sys_unshare
273 64 set_robust_list sys_set_robust_list
274 64 get_robust_list sys_get_robust_list
-275 64 splice sys_splice
-276 64 tee sys_tee
-277 64 sync_file_range sys_sync_file_range
+275 common splice sys_splice
+276 common tee sys_tee
+277 common sync_file_range sys_sync_file_range
278 64 vmsplice sys_vmsplice
279 64 move_pages sys_move_pages
-280 64 utimensat sys_utimensat
-281 64 epoll_pwait sys_epoll_pwait
-282 64 signalfd sys_signalfd
-283 64 timerfd_create sys_timerfd_create
-284 64 eventfd sys_eventfd
-285 64 fallocate sys_fallocate
-286 64 timerfd_settime sys_timerfd_settime
-287 64 timerfd_gettime sys_timerfd_gettime
-288 64 accept4 sys_accept4
-289 64 signalfd4 sys_signalfd4
-290 64 eventfd2 sys_eventfd2
-291 64 epoll_create1 sys_epoll_create1
-292 64 dup3 sys_dup3
-293 64 pipe2 sys_pipe2
-294 64 inotify_init1 sys_inotify_init1
+280 common utimensat sys_utimensat
+281 common epoll_pwait sys_epoll_pwait
+282 common signalfd sys_signalfd
+283 common timerfd_create sys_timerfd_create
+284 common eventfd sys_eventfd
+285 common fallocate sys_fallocate
+286 common timerfd_settime sys_timerfd_settime
+287 common timerfd_gettime sys_timerfd_gettime
+288 common accept4 sys_accept4
+289 common signalfd4 sys_signalfd4
+290 common eventfd2 sys_eventfd2
+291 common epoll_create1 sys_epoll_create1
+292 common dup3 sys_dup3
+293 common pipe2 sys_pipe2
+294 common inotify_init1 sys_inotify_init1
295 64 preadv sys_preadv
296 64 pwritev sys_pwritev
297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
-298 64 perf_event_open sys_perf_event_open
+298 common perf_event_open sys_perf_event_open
299 64 recvmmsg sys_recvmmsg
-300 64 fanotify_init sys_fanotify_init
-301 64 fanotify_mark sys_fanotify_mark
-302 64 prlimit64 sys_prlimit64
-303 64 name_to_handle_at sys_name_to_handle_at
-304 64 open_by_handle_at sys_open_by_handle_at
-305 64 clock_adjtime sys_clock_adjtime
-306 64 syncfs sys_syncfs
+300 common fanotify_init sys_fanotify_init
+301 common fanotify_mark sys_fanotify_mark
+302 common prlimit64 sys_prlimit64
+303 common name_to_handle_at sys_name_to_handle_at
+304 common open_by_handle_at sys_open_by_handle_at
+305 common clock_adjtime sys_clock_adjtime
+306 common syncfs sys_syncfs
307 64 sendmmsg sys_sendmmsg
-308 64 setns sys_setns
-309 64 getcpu sys_getcpu
+308 common setns sys_setns
+309 common getcpu sys_getcpu
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
+#
+# x32-specific system call numbers start at 512 to avoid cache impact
+# for native 64-bit operation.
+#
+512 x32 rt_sigaction sys32_rt_sigaction
+513 x32 rt_sigreturn stub_x32_rt_sigreturn
+514 x32 ioctl compat_sys_ioctl
+515 x32 readv compat_sys_readv
+516 x32 writev compat_sys_writev
+517 x32 recvfrom compat_sys_recvfrom
+518 x32 sendmsg compat_sys_sendmsg
+519 x32 recvmsg compat_sys_recvmsg
+520 x32 execve stub_x32_execve
+521 x32 ptrace compat_sys_ptrace
+522 x32 rt_sigpending sys32_rt_sigpending
+523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
+524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo
+525 x32 sigaltstack stub_x32_sigaltstack
+526 x32 timer_create compat_sys_timer_create
+527 x32 mq_notify compat_sys_mq_notify
+528 x32 kexec_load compat_sys_kexec_load
+529 x32 waitid compat_sys_waitid
+530 x32 set_robust_list compat_sys_set_robust_list
+531 x32 get_robust_list compat_sys_get_robust_list
+532 x32 vmsplice compat_sys_vmsplice
+533 x32 move_pages compat_sys_move_pages
+534 x32 preadv compat_sys_preadv64
+535 x32 pwritev compat_sys_pwritev64
+536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+537 x32 recvmmsg compat_sys_recvmmsg
+538 x32 sendmmsg compat_sys_sendmmsg
+539 x32 process_vm_readv compat_sys_process_vm_readv
+540 x32 process_vm_writev compat_sys_process_vm_writev
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index fe626c3ba01b..9924776f4265 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -35,6 +35,9 @@
#define stub_sigaltstack sys_sigaltstack
#define stub_rt_sigreturn sys_rt_sigreturn
+#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
+
#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
#include <asm/syscalls_64.h>
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index 5edf4f4bbf53..ce7e3607a870 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -15,6 +15,8 @@ static char syscalls[] = {
};
#else
#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
static char syscalls[] = {
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore
index 60274d5746e1..3282874bc61d 100644
--- a/arch/x86/vdso/.gitignore
+++ b/arch/x86/vdso/.gitignore
@@ -1,5 +1,7 @@
vdso.lds
vdso-syms.lds
+vdsox32.lds
+vdsox32-syms.lds
vdso32-syms.lds
vdso32-syscall-syms.lds
vdso32-sysenter-syms.lds
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 5d179502a52c..fd14be1d1472 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -3,21 +3,29 @@
#
VDSO64-$(CONFIG_X86_64) := y
+VDSOX32-$(CONFIG_X86_X32_ABI) := y
VDSO32-$(CONFIG_X86_32) := y
VDSO32-$(CONFIG_COMPAT) := y
vdso-install-$(VDSO64-y) += vdso.so
+vdso-install-$(VDSOX32-y) += vdsox32.so
vdso-install-$(VDSO32-y) += $(vdso32-images)
# files to link into the vdso
vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
+vobjs-$(VDSOX32-y) += $(vobjx32s-compat)
+
+# Filter out x32 objects.
+vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
+
# files to link into kernel
obj-$(VDSO64-y) += vma.o vdso.o
+obj-$(VDSOX32-y) += vdsox32.o
obj-$(VDSO32-y) += vdso32.o vdso32-setup.o
-vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+vobjs := $(foreach F,$(vobj64s),$(obj)/$F)
$(obj)/vdso.o: $(obj)/vdso.so
@@ -73,6 +81,42 @@ $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
$(call if_changed,vdsosym)
#
+# X32 processes use x32 vDSO to access 64bit kernel data.
+#
+# Build x32 vDSO image:
+# 1. Compile x32 vDSO as 64bit.
+# 2. Convert object files to x32.
+# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes
+# so that it can reach 64bit address space with 64bit pointers.
+#
+
+targets += vdsox32-syms.lds
+obj-$(VDSOX32-y) += vdsox32-syms.lds
+
+CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
+ -Wl,-soname=linux-vdso.so.1 \
+ -Wl,-z,max-page-size=4096 \
+ -Wl,-z,common-page-size=4096
+
+vobjx32s-y := $(vobj64s:.o=-x32.o)
+vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
+
+# Convert 64bit object file to x32 for x32 vDSO.
+quiet_cmd_x32 = X32 $@
+ cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@
+
+$(obj)/%-x32.o: $(obj)/%.o FORCE
+ $(call if_changed,x32)
+
+targets += vdsox32.so vdsox32.so.dbg vdsox32.lds $(vobjx32s-y)
+
+$(obj)/vdsox32.o: $(src)/vdsox32.S $(obj)/vdsox32.so
+
+$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
+ $(call if_changed,vdso)
+
+#
# Build multiple 32-bit vDSO images to choose from at boot time.
#
obj-$(VDSO32-y) += vdso32-syms.lds
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6bc0e723b6e8..885eff49d6ab 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
return ret;
}
+notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
+{
+ long ret;
+
+ asm("syscall" : "=a" (ret) :
+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+ return ret;
+}
+
+
notrace static inline long vgetns(void)
{
long v;
cycles_t cycles;
if (gtod->clock.vclock_mode == VCLOCK_TSC)
cycles = vread_tsc();
- else
+ else if (gtod->clock.vclock_mode == VCLOCK_HPET)
cycles = vread_hpet();
+ else
+ return 0;
v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
return (v * gtod->clock.mult) >> gtod->clock.shift;
}
-notrace static noinline int do_realtime(struct timespec *ts)
+/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
+notrace static int __always_inline do_realtime(struct timespec *ts)
{
unsigned long seq, ns;
+ int mode;
+
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_seqcount_begin(&gtod->seq);
+ mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ts->tv_nsec = gtod->wall_time_nsec;
ns = vgetns();
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
+
timespec_add_ns(ts, ns);
- return 0;
+ return mode;
}
-notrace static noinline int do_monotonic(struct timespec *ts)
+notrace static int do_monotonic(struct timespec *ts)
{
- unsigned long seq, ns, secs;
+ unsigned long seq, ns;
+ int mode;
+
do {
- seq = read_seqbegin(&gtod->lock);
- secs = gtod->wall_time_sec;
- ns = gtod->wall_time_nsec + vgetns();
- secs += gtod->wall_to_monotonic.tv_sec;
- ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
-
- /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
- * are all guaranteed to be nonnegative.
- */
- while (ns >= NSEC_PER_SEC) {
- ns -= NSEC_PER_SEC;
- ++secs;
- }
- ts->tv_sec = secs;
- ts->tv_nsec = ns;
+ seq = read_seqcount_begin(&gtod->seq);
+ mode = gtod->clock.vclock_mode;
+ ts->tv_sec = gtod->monotonic_time_sec;
+ ts->tv_nsec = gtod->monotonic_time_nsec;
+ ns = vgetns();
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
+ timespec_add_ns(ts, ns);
- return 0;
+ return mode;
}
-notrace static noinline int do_realtime_coarse(struct timespec *ts)
+notrace static int do_realtime_coarse(struct timespec *ts)
{
unsigned long seq;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
return 0;
}
-notrace static noinline int do_monotonic_coarse(struct timespec *ts)
+notrace static int do_monotonic_coarse(struct timespec *ts)
{
- unsigned long seq, ns, secs;
+ unsigned long seq;
do {
- seq = read_seqbegin(&gtod->lock);
- secs = gtod->wall_time_coarse.tv_sec;
- ns = gtod->wall_time_coarse.tv_nsec;
- secs += gtod->wall_to_monotonic.tv_sec;
- ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
-
- /* wall_time_nsec and wall_to_monotonic.tv_nsec are
- * guaranteed to be between 0 and NSEC_PER_SEC.
- */
- if (ns >= NSEC_PER_SEC) {
- ns -= NSEC_PER_SEC;
- ++secs;
- }
- ts->tv_sec = secs;
- ts->tv_nsec = ns;
+ seq = read_seqcount_begin(&gtod->seq);
+ ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
+ ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
return 0;
}
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
+ int ret = VCLOCK_NONE;
+
switch (clock) {
case CLOCK_REALTIME:
- if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
- return do_realtime(ts);
+ ret = do_realtime(ts);
break;
case CLOCK_MONOTONIC:
- if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
- return do_monotonic(ts);
+ ret = do_monotonic(ts);
break;
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(ts);
@@ -171,32 +169,33 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
return do_monotonic_coarse(ts);
}
- return vdso_fallback_gettime(clock, ts);
+ if (ret == VCLOCK_NONE)
+ return vdso_fallback_gettime(clock, ts);
+ return 0;
}
int clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
- long ret;
- if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) {
- if (likely(tv != NULL)) {
- BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
- offsetof(struct timespec, tv_nsec) ||
- sizeof(*tv) != sizeof(struct timespec));
- do_realtime((struct timespec *)tv);
- tv->tv_usec /= 1000;
- }
- if (unlikely(tz != NULL)) {
- /* Avoid memcpy. Some old compilers fail to inline it */
- tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
- tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
- }
- return 0;
+ long ret = VCLOCK_NONE;
+
+ if (likely(tv != NULL)) {
+ BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
+ offsetof(struct timespec, tv_nsec) ||
+ sizeof(*tv) != sizeof(struct timespec));
+ ret = do_realtime((struct timespec *)tv);
+ tv->tv_usec /= 1000;
}
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
- return ret;
+ if (unlikely(tz != NULL)) {
+ /* Avoid memcpy. Some old compilers fail to inline it */
+ tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
+ tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
+ }
+
+ if (ret == VCLOCK_NONE)
+ return vdso_fallback_gtod(tv, tz);
+ return 0;
}
int gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index a944020fa859..66e6d9359826 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -311,6 +311,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int ret = 0;
bool compat;
+#ifdef CONFIG_X86_X32_ABI
+ if (test_thread_flag(TIF_X32))
+ return x32_setup_additional_pages(bprm, uses_interp);
+#endif
+
if (vdso_enabled == VDSO_DISABLED)
return 0;
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S
new file mode 100644
index 000000000000..d6b9a7f42a8a
--- /dev/null
+++ b/arch/x86/vdso/vdsox32.S
@@ -0,0 +1,22 @@
+#include <asm/page_types.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+__PAGE_ALIGNED_DATA
+
+ .globl vdsox32_start, vdsox32_end
+ .align PAGE_SIZE
+vdsox32_start:
+ .incbin "arch/x86/vdso/vdsox32.so"
+vdsox32_end:
+ .align PAGE_SIZE /* extra data here leaks to userspace. */
+
+.previous
+
+ .globl vdsox32_pages
+ .bss
+ .align 8
+ .type vdsox32_pages, @object
+vdsox32_pages:
+ .zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
+ .size vdsox32_pages, .-vdsox32_pages
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
new file mode 100644
index 000000000000..62272aa2ae0a
--- /dev/null
+++ b/arch/x86/vdso/vdsox32.lds.S
@@ -0,0 +1,28 @@
+/*
+ * Linker script for x32 vDSO.
+ * We #include the file to define the layout details.
+ * Here we only choose the prelinked virtual address.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO. We can define local symbols here called VDSO* to make their
+ * values visible using the asm-x86/vdso.h macros from the kernel proper.
+ */
+
+#define VDSO_PRELINK 0
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_getcpu;
+ __vdso_time;
+ local: *;
+ };
+}
+
+VDSOX32_PRELINK = VDSO_PRELINK;
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 17e18279649f..00aaf047b39f 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -24,7 +24,44 @@ extern unsigned short vdso_sync_cpuid;
extern struct page *vdso_pages[];
static unsigned vdso_size;
-static void __init patch_vdso(void *vdso, size_t len)
+#ifdef CONFIG_X86_X32_ABI
+extern char vdsox32_start[], vdsox32_end[];
+extern struct page *vdsox32_pages[];
+static unsigned vdsox32_size;
+
+static void __init patch_vdsox32(void *vdso, size_t len)
+{
+ Elf32_Ehdr *hdr = vdso;
+ Elf32_Shdr *sechdrs, *alt_sec = 0;
+ char *secstrings;
+ void *alt_data;
+ int i;
+
+ BUG_ON(len < sizeof(Elf32_Ehdr));
+ BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
+
+ sechdrs = (void *)hdr + hdr->e_shoff;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ Elf32_Shdr *shdr = &sechdrs[i];
+ if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
+ alt_sec = shdr;
+ goto found;
+ }
+ }
+
+ /* If we get here, it's probably a bug. */
+ pr_warning("patch_vdsox32: .altinstructions not found\n");
+ return; /* nothing to patch */
+
+found:
+ alt_data = (void *)hdr + alt_sec->sh_offset;
+ apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
+}
+#endif
+
+static void __init patch_vdso64(void *vdso, size_t len)
{
Elf64_Ehdr *hdr = vdso;
Elf64_Shdr *sechdrs, *alt_sec = 0;
@@ -47,7 +84,7 @@ static void __init patch_vdso(void *vdso, size_t len)
}
/* If we get here, it's probably a bug. */
- pr_warning("patch_vdso: .altinstructions not found\n");
+ pr_warning("patch_vdso64: .altinstructions not found\n");
return; /* nothing to patch */
found:
@@ -60,12 +97,20 @@ static int __init init_vdso(void)
int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
int i;
- patch_vdso(vdso_start, vdso_end - vdso_start);
+ patch_vdso64(vdso_start, vdso_end - vdso_start);
vdso_size = npages << PAGE_SHIFT;
for (i = 0; i < npages; i++)
vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
+#ifdef CONFIG_X86_X32_ABI
+ patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
+ npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
+ vdsox32_size = npages << PAGE_SHIFT;
+ for (i = 0; i < npages; i++)
+ vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
+#endif
+
return 0;
}
subsys_initcall(init_vdso);
@@ -103,7 +148,10 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
/* Setup a VMA at program startup for the vsyscall page.
Not called for compat tasks */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+static int setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp,
+ struct page **pages,
+ unsigned size)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
@@ -113,8 +161,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return 0;
down_write(&mm->mmap_sem);
- addr = vdso_addr(mm->start_stack, vdso_size);
- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
+ addr = vdso_addr(mm->start_stack, size);
+ addr = get_unmapped_area(NULL, addr, size, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
@@ -122,10 +170,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
current->mm->context.vdso = (void *)addr;
- ret = install_special_mapping(mm, addr, vdso_size,
+ ret = install_special_mapping(mm, addr, size,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- vdso_pages);
+ pages);
if (ret) {
current->mm->context.vdso = NULL;
goto up_fail;
@@ -136,6 +184,20 @@ up_fail:
return ret;
}
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ return setup_additional_pages(bprm, uses_interp, vdso_pages,
+ vdso_size);
+}
+
+#ifdef CONFIG_X86_X32_ABI
+int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
+ vdsox32_size);
+}
+#endif
+
static __init int vdso_setup(char *s)
{
vdso_enabled = simple_strtoul(s, NULL, 0);
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index f932b30b47fb..ddab37b24741 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -113,7 +113,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
# CONFIG_INLINE_SPIN_LOCK_BH is not set
# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
-CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_UNINLINE_SPIN_UNLOCK is not set
# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
diff --git a/arch/xtensa/include/asm/posix_types.h b/arch/xtensa/include/asm/posix_types.h
index 6b2190c35882..6e96be0d02d3 100644
--- a/arch/xtensa/include/asm/posix_types.h
+++ b/arch/xtensa/include/asm/posix_types.h
@@ -19,104 +19,21 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef unsigned long __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
+#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifndef __GNUC__
-
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
-#define __FD_ZERO(set) \
- ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
+#define __kernel_old_uid_t __kernel_old_uid_t
-#if defined(__KERNEL__)
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned int *tmp = (unsigned int *)p->fds_bits;
- int i;
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 8:
- tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
- tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
+#include <asm-generic/posix_types.h>
-#endif /* defined(__KERNEL__) */
-#endif /* __GNUC__ */
#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7556913aba45..47768ff87343 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -384,6 +384,15 @@ config ACPI_CUSTOM_METHOD
load additional kernel modules after boot, this feature may be used
to override that restriction).
+config ACPI_BGRT
+ tristate "Boottime Graphics Resource Table support"
+ default n
+ help
+ This driver adds support for exposing the ACPI Boottime Graphics
+ Resource Table, which allows the operating system to obtain
+ data from the firmware boot splash. It will appear under
+ /sys/firmware/acpi/bgrt/ .
+
source "drivers/acpi/apei/Kconfig"
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 1567028d2038..47199e2a9130 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
+obj-$(CONFIG_ACPI_BGRT) += bgrt.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 0ca208b6dcf0..793b8cc8e256 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -68,12 +68,14 @@ acpi-y += \
acpi-y += \
hwacpi.o \
+ hwesleep.o \
hwgpe.o \
hwpci.o \
hwregs.o \
hwsleep.o \
hwvalid.o \
- hwxface.o
+ hwxface.o \
+ hwxfsleep.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index a44bd424f9f4..8a7d51bfb3b3 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -51,7 +51,6 @@
*
* Note: The order of these include files is important.
*/
-#include "acconfig.h" /* Global configuration constants */
#include "acmacros.h" /* C macros */
#include "aclocal.h" /* Internal data types */
#include "acobject.h" /* ACPI internal object */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index deaa81979561..5e8abb07724f 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -111,7 +111,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg);
void acpi_db_set_scope(char *name);
-acpi_status acpi_db_sleep(char *object_arg);
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg))
void acpi_db_find_references(char *object_arg);
@@ -119,11 +119,13 @@ void acpi_db_display_locks(void);
void acpi_db_display_resources(char *object_arg);
-void acpi_db_display_gpes(void);
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
void acpi_db_check_integrity(void);
-void acpi_db_generate_gpe(char *gpe_arg, char *block_arg);
+ACPI_HW_DEPENDENT_RETURN_VOID(void
+ acpi_db_generate_gpe(char *gpe_arg,
+ char *block_arg))
void acpi_db_check_predefined_names(void);
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c53caa521a30..d700f63e4701 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -69,11 +69,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
*/
acpi_status acpi_ev_init_global_lock_handler(void);
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_remove_global_lock_handler(void);
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
+ acpi_ev_acquire_global_lock(u16 timeout))
+ ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+ acpi_status acpi_ev_remove_global_lock_handler(void);
/*
* evgpe - Low-level GPE support
@@ -114,7 +113,9 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context);
-acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
+ acpi_ev_delete_gpe_block(struct acpi_gpe_block_info
+ *gpe_block))
u32
acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
@@ -126,9 +127,10 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
*/
acpi_status acpi_ev_gpe_initialize(void);
-void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
+ACPI_HW_DEPENDENT_RETURN_VOID(void
+ acpi_ev_update_gpes(acpi_owner_id table_owner_id))
-acpi_status
+ acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -237,6 +239,5 @@ acpi_status acpi_ev_remove_sci_handler(void);
u32 acpi_ev_initialize_sCI(u32 program_sCI);
-void acpi_ev_terminate(void);
-
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 2853f7673f3b..4f7d3f57d05c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -147,7 +147,7 @@ u8 acpi_gbl_system_awake_and_running;
*/
u8 acpi_gbl_reduced_hardware;
-#endif
+#endif /* DEFINE_ACPI_GLOBALS */
/* Do not disassemble buffers to resource descriptors */
@@ -184,8 +184,12 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
* found in the RSDT/XSDT.
*/
ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
+
+#if (!ACPI_REDUCED_HARDWARE)
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
+#endif /* !ACPI_REDUCED_HARDWARE */
+
/* These addresses are calculated from the FADT Event Block addresses */
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
@@ -397,10 +401,15 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+
+#if (!ACPI_REDUCED_HARDWARE)
+
ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
+#endif /* !ACPI_REDUCED_HARDWARE */
+
/*****************************************************************************
*
* Debugger globals
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 677793e938f5..5ccb99ae3a6f 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -81,6 +81,26 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value);
acpi_status acpi_hw_clear_acpi_status(void);
/*
+ * hwsleep - sleep/wake support (Legacy sleep registers)
+ */
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
+
+/*
+ * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
+ */
+void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
+
+acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
+
+/*
* hwvalid - Port I/O with validation
*/
acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width);
@@ -128,16 +148,4 @@ acpi_status
acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
acpi_handle root_pci_device, acpi_handle pci_region);
-#ifdef ACPI_FUTURE_USAGE
-/*
- * hwtimer - ACPI Timer prototypes
- */
-acpi_status acpi_get_timer_resolution(u32 * resolution);
-
-acpi_status acpi_get_timer(u32 * ticks);
-
-acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
-#endif /* ACPI_FUTURE_USAGE */
-
#endif /* __ACHWARE_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 3f24068837d5..e3922ca20e7f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -370,6 +370,7 @@ struct acpi_predefined_data {
/* Defines for Flags field above */
#define ACPI_OBJECT_REPAIRED 1
+#define ACPI_OBJECT_WRAPPED 2
/*
* Bitmapped return value types
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index ef338a96f5b2..f119f473f71a 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -516,6 +516,12 @@
#endif /* ACPI_DEBUG_OUTPUT */
+#if (!ACPI_REDUCED_HARDWARE)
+#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr
+#else
+#define ACPI_HW_OPTIONAL_FUNCTION(addr) NULL
+#endif
+
/*
* Some code only gets executed when the debugger is built in.
* Note that this is entirely independent of whether the
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 2c9e0f049523..9b19d4b86424 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -283,8 +283,9 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
acpi_status
-acpi_ns_repair_package_list(struct acpi_predefined_data *data,
- union acpi_operand_object **obj_desc_ptr);
+acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
+ union acpi_operand_object *original_object,
+ union acpi_operand_object **obj_desc_ptr);
acpi_status
acpi_ns_repair_null_element(struct acpi_predefined_data *data,
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index d5bec304c823..6712965ba8ae 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -67,6 +67,11 @@ acpi_status acpi_tb_resize_root_table_list(void);
acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
+struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
+ *table_header,
+ struct acpi_table_desc
+ *table_desc);
+
acpi_status
acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 6729ebe2f1e6..07e4dc44f81c 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -47,7 +47,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evevent")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static acpi_status acpi_ev_fixed_event_initialize(void);
@@ -291,3 +291,5 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
return ((acpi_gbl_fixed_event_handlers[event].
handler) (acpi_gbl_fixed_event_handlers[event].context));
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 5e5683cb1f0d..cfeab38795d8 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evglock")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static u32 acpi_ev_global_lock_handler(void *context);
@@ -339,3 +339,5 @@ acpi_status acpi_ev_release_global_lock(void)
acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
return_ACPI_STATUS(status);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 9e88cb6fb25e..8ba0e5f17091 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpe")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
@@ -766,3 +766,5 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index be75339cd5dd..23a3ca86b2eb 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeblk")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static acpi_status
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
@@ -504,3 +504,5 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return_ACPI_STATUS(AE_OK);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index adf7494da9db..da0add858f81 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeinit")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/*
* Note: History of _PRW support in ACPICA
*
@@ -440,3 +440,5 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
name, gpe_number));
return_ACPI_STATUS(AE_OK);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 25073932aa10..3c43796b8361 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -48,6 +48,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeutil")
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/*******************************************************************************
*
* FUNCTION: acpi_ev_walk_gpe_list
@@ -374,3 +375,5 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return_ACPI_STATUS(AE_OK);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 84966f416463..51ef9f5e002d 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -108,27 +108,30 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
ACPI_FUNCTION_NAME(ev_queue_notify_request);
/*
- * For value 3 (Ejection Request), some device method may need to be run.
- * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
- * to be run.
+ * For value 0x03 (Ejection Request), may need to run a device method.
+ * For value 0x02 (Device Wake), if _PRW exists, may need to run
+ * the _PS0 method.
* For value 0x80 (Status Change) on the power button or sleep button,
- * initiate soft-off or sleep operation?
+ * initiate soft-off or sleep operation.
+ *
+ * For all cases, simply dispatch the notify to the handler.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
- acpi_ut_get_node_name(node), node, notify_value,
- acpi_ut_get_notify_name(notify_value)));
+ "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
+ acpi_ut_get_node_name(node),
+ acpi_ut_get_type_name(node->type), notify_value,
+ acpi_ut_get_notify_name(notify_value), node));
/* Get the notify object attached to the NS Node */
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
- /* We have the notify object, Get the right handler */
+ /* We have the notify object, Get the correct handler */
switch (node->type) {
- /* Notify allowed only on these types */
+ /* Notify is allowed only on these types */
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
@@ -152,7 +155,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
}
/*
- * If there is any handler to run, schedule the dispatcher.
+ * If there is a handler to run, schedule the dispatcher.
* Check for:
* 1) Global system notify handler
* 2) Global device notify handler
@@ -270,6 +273,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
acpi_ut_delete_generic_state(notify_info);
}
+#if (!ACPI_REDUCED_HARDWARE)
/******************************************************************************
*
* FUNCTION: acpi_ev_terminate
@@ -338,3 +342,5 @@ void acpi_ev_terminate(void)
}
return_VOID;
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 26065c612e76..6a57aa2d70d1 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evsci")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
@@ -181,3 +181,5 @@ acpi_status acpi_ev_remove_sci_handler(void)
return_ACPI_STATUS(status);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 61944e89565a..44bef5744ebb 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -51,222 +51,6 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxface")
-/*******************************************************************************
- *
- * FUNCTION: acpi_install_exception_handler
- *
- * PARAMETERS: Handler - Pointer to the handler function for the
- * event
- *
- * RETURN: Status
- *
- * DESCRIPTION: Saves the pointer to the handler function
- *
- ******************************************************************************/
-#ifdef ACPI_FUTURE_USAGE
-acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Don't allow two handlers. */
-
- if (acpi_gbl_exception_handler) {
- status = AE_ALREADY_EXISTS;
- goto cleanup;
- }
-
- /* Install the handler */
-
- acpi_gbl_exception_handler = handler;
-
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
-#endif /* ACPI_FUTURE_USAGE */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_install_global_event_handler
- *
- * PARAMETERS: Handler - Pointer to the global event handler function
- * Context - Value passed to the handler on each event
- *
- * RETURN: Status
- *
- * DESCRIPTION: Saves the pointer to the handler function. The global handler
- * is invoked upon each incoming GPE and Fixed Event. It is
- * invoked at interrupt level at the time of the event dispatch.
- * Can be used to update event counters, etc.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
-
- /* Parameter validation */
-
- if (!handler) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Don't allow two handlers. */
-
- if (acpi_gbl_global_event_handler) {
- status = AE_ALREADY_EXISTS;
- goto cleanup;
- }
-
- acpi_gbl_global_event_handler = handler;
- acpi_gbl_global_event_handler_context = context;
-
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_install_fixed_event_handler
- *
- * PARAMETERS: Event - Event type to enable.
- * Handler - Pointer to the handler function for the
- * event
- * Context - Value passed to the handler on each GPE
- *
- * RETURN: Status
- *
- * DESCRIPTION: Saves the pointer to the handler function and then enables the
- * event.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_fixed_event_handler(u32 event,
- acpi_event_handler handler, void *context)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
-
- /* Parameter validation */
-
- if (event > ACPI_EVENT_MAX) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Don't allow two handlers. */
-
- if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
- status = AE_ALREADY_EXISTS;
- goto cleanup;
- }
-
- /* Install the handler before enabling the event */
-
- acpi_gbl_fixed_event_handlers[event].handler = handler;
- acpi_gbl_fixed_event_handlers[event].context = context;
-
- status = acpi_clear_event(event);
- if (ACPI_SUCCESS(status))
- status = acpi_enable_event(event, 0);
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
- event));
-
- /* Remove the handler */
-
- acpi_gbl_fixed_event_handlers[event].handler = NULL;
- acpi_gbl_fixed_event_handlers[event].context = NULL;
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Enabled fixed event %X, Handler=%p\n", event,
- handler));
- }
-
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_remove_fixed_event_handler
- *
- * PARAMETERS: Event - Event type to disable.
- * Handler - Address of the handler
- *
- * RETURN: Status
- *
- * DESCRIPTION: Disables the event and unregisters the event handler.
- *
- ******************************************************************************/
-acpi_status
-acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
-{
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
-
- /* Parameter validation */
-
- if (event > ACPI_EVENT_MAX) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Disable the event before removing the handler */
-
- status = acpi_disable_event(event, 0);
-
- /* Always Remove the handler */
-
- acpi_gbl_fixed_event_handlers[event].handler = NULL;
- acpi_gbl_fixed_event_handlers[event].context = NULL;
-
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO,
- "Could not write to fixed event enable register 0x%X",
- event));
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
- event));
- }
-
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
@@ -334,6 +118,7 @@ acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
return AE_OK;
}
+
/*******************************************************************************
*
* FUNCTION: acpi_install_notify_handler
@@ -705,6 +490,224 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
/*******************************************************************************
*
+ * FUNCTION: acpi_install_exception_handler
+ *
+ * PARAMETERS: Handler - Pointer to the handler function for the
+ * event
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function
+ *
+ ******************************************************************************/
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (acpi_gbl_exception_handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler */
+
+ acpi_gbl_exception_handler = handler;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
+#endif /* ACPI_FUTURE_USAGE */
+
+#if (!ACPI_REDUCED_HARDWARE)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_global_event_handler
+ *
+ * PARAMETERS: Handler - Pointer to the global event handler function
+ * Context - Value passed to the handler on each event
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function. The global handler
+ * is invoked upon each incoming GPE and Fixed Event. It is
+ * invoked at interrupt level at the time of the event dispatch.
+ * Can be used to update event counters, etc.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
+
+ /* Parameter validation */
+
+ if (!handler) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (acpi_gbl_global_event_handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ acpi_gbl_global_event_handler = handler;
+ acpi_gbl_global_event_handler_context = context;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_fixed_event_handler
+ *
+ * PARAMETERS: Event - Event type to enable.
+ * Handler - Pointer to the handler function for the
+ * event
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function and then enables the
+ * event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_fixed_event_handler(u32 event,
+ acpi_event_handler handler, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
+
+ /* Parameter validation */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler before enabling the event */
+
+ acpi_gbl_fixed_event_handlers[event].handler = handler;
+ acpi_gbl_fixed_event_handlers[event].context = context;
+
+ status = acpi_clear_event(event);
+ if (ACPI_SUCCESS(status))
+ status = acpi_enable_event(event, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
+ event));
+
+ /* Remove the handler */
+
+ acpi_gbl_fixed_event_handlers[event].handler = NULL;
+ acpi_gbl_fixed_event_handlers[event].context = NULL;
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Enabled fixed event %X, Handler=%p\n", event,
+ handler));
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_fixed_event_handler
+ *
+ * PARAMETERS: Event - Event type to disable.
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disables the event and unregisters the event handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
+
+ /* Parameter validation */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Disable the event before removing the handler */
+
+ status = acpi_disable_event(event, 0);
+
+ /* Always Remove the handler */
+
+ acpi_gbl_fixed_event_handlers[event].handler = NULL;
+ acpi_gbl_fixed_event_handlers[event].context = NULL;
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO,
+ "Could not write to fixed event enable register 0x%X",
+ event));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
+ event));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_install_gpe_handler
*
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
@@ -984,3 +987,4 @@ acpi_status acpi_release_global_lock(u32 handle)
}
ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 1768bbec1002..77cee5a5e891 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -49,6 +49,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxfevnt")
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/*******************************************************************************
*
* FUNCTION: acpi_enable
@@ -352,3 +353,4 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
}
ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 33388fd69df4..86f9b343ebd4 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -50,6 +50,7 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxfgpe")
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/******************************************************************************
*
* FUNCTION: acpi_update_all_gpes
@@ -695,3 +696,4 @@ acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
}
ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index d21ec5f0b3a9..d0b9ed5df97e 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -48,6 +48,7 @@
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwacpi")
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/******************************************************************************
*
* FUNCTION: acpi_hw_set_mode
@@ -166,3 +167,5 @@ u32 acpi_hw_get_mode(void)
return_UINT32(ACPI_SYS_MODE_LEGACY);
}
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
new file mode 100644
index 000000000000..29e859293edd
--- /dev/null
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -0,0 +1,247 @@
+/******************************************************************************
+ *
+ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
+ * extended FADT-V5 sleep registers.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwesleep")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_execute_sleep_method
+ *
+ * PARAMETERS: method_pathname - Pathname of method to execute
+ * integer_argument - Argument to pass to the method
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Execute a sleep/wake related method with one integer argument
+ * and no return value.
+ *
+ ******************************************************************************/
+void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(hw_execute_sleep_method);
+
+ /* One argument, integer_argument; No return value expected */
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = (u64)integer_argument;
+
+ status = acpi_evaluate_object(NULL, method_pathname, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "While executing method %s",
+ method_pathname));
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_extended_sleep
+ *
+ * PARAMETERS: sleep_state - Which sleep state to enter
+ * Flags - ACPI_EXECUTE_GTS to run optional method
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enter a system sleep state via the extended FADT sleep
+ * registers (V5 FADT).
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
+{
+ acpi_status status;
+ u8 sleep_type_value;
+ u64 sleep_status;
+
+ ACPI_FUNCTION_TRACE(hw_extended_sleep);
+
+ /* Extended sleep registers must be valid */
+
+ if (!acpi_gbl_FADT.sleep_control.address ||
+ !acpi_gbl_FADT.sleep_status.address) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Clear wake status (WAK_STS) */
+
+ status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_system_awake_and_running = FALSE;
+
+ /* Optionally execute _GTS (Going To Sleep) */
+
+ if (flags & ACPI_EXECUTE_GTS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
+ }
+
+ /* Flush caches, as per ACPI specification */
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ /*
+ * Set the SLP_TYP and SLP_EN bits.
+ *
+ * Note: We only use the first value returned by the \_Sx method
+ * (acpi_gbl_sleep_type_a) - As per ACPI specification.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Entering sleep state [S%u]\n", sleep_state));
+
+ sleep_type_value =
+ ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+ ACPI_X_SLEEP_TYPE_MASK);
+
+ status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+ &acpi_gbl_FADT.sleep_control);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Wait for transition back to Working State */
+
+ do {
+ status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ } while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_extended_wake_prep
+ *
+ * PARAMETERS: sleep_state - Which sleep state we just exited
+ * Flags - ACPI_EXECUTE_BFS to run optional method
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform first part of OS-independent ACPI cleanup after
+ * a sleep. Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
+{
+ acpi_status status;
+ u8 sleep_type_value;
+
+ ACPI_FUNCTION_TRACE(hw_extended_wake_prep);
+
+ status = acpi_get_sleep_type_data(ACPI_STATE_S0,
+ &acpi_gbl_sleep_type_a,
+ &acpi_gbl_sleep_type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_type_value =
+ ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+ ACPI_X_SLEEP_TYPE_MASK);
+
+ (void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+ &acpi_gbl_FADT.sleep_control);
+ }
+
+ /* Optionally execute _BFS (Back From Sleep) */
+
+ if (flags & ACPI_EXECUTE_BFS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
+ }
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_extended_wake
+ *
+ * PARAMETERS: sleep_state - Which sleep state we just exited
+ * Flags - Reserved, set to zero
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
+ * Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
+{
+ ACPI_FUNCTION_TRACE(hw_extended_wake);
+
+ /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
+
+ acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
+
+ /* Execute the wake methods */
+
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
+
+ /*
+ * Some BIOS code assumes that WAK_STS will be cleared on resume
+ * and use it to determine whether the system is rebooting or
+ * resuming. Clear WAK_STS for compatibility.
+ */
+ (void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+ acpi_gbl_system_awake_and_running = TRUE;
+
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1a6894afef79..25bd28c4ae8d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwgpe")
-
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static acpi_status
acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
@@ -479,3 +479,5 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
return_ACPI_STATUS(status);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 4ea4eeb51bfd..6b6c83b87b52 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -51,6 +51,7 @@
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwregs")
+#if (!ACPI_REDUCED_HARDWARE)
/* Local Prototypes */
static acpi_status
acpi_hw_read_multiple(u32 *value,
@@ -62,6 +63,8 @@ acpi_hw_write_multiple(u32 value,
struct acpi_generic_address *register_a,
struct acpi_generic_address *register_b);
+#endif /* !ACPI_REDUCED_HARDWARE */
+
/******************************************************************************
*
* FUNCTION: acpi_hw_validate_register
@@ -154,6 +157,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
{
u64 address;
+ u64 value64;
acpi_status status;
ACPI_FUNCTION_NAME(hw_read);
@@ -175,7 +179,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
status = acpi_os_read_memory((acpi_physical_address)
- address, value, reg->bit_width);
+ address, &value64, reg->bit_width);
+
+ *value = (u32)value64;
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
status = acpi_hw_read_port((acpi_io_address)
@@ -225,7 +231,8 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
status = acpi_os_write_memory((acpi_physical_address)
- address, value, reg->bit_width);
+ address, (u64)value,
+ reg->bit_width);
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
status = acpi_hw_write_port((acpi_io_address)
@@ -240,6 +247,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
return (status);
}
+#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_hw_clear_acpi_status
@@ -285,7 +293,7 @@ exit:
/*******************************************************************************
*
- * FUNCTION: acpi_hw_get_register_bit_mask
+ * FUNCTION: acpi_hw_get_bit_register_info
*
* PARAMETERS: register_id - Index of ACPI Register to access
*
@@ -658,3 +666,5 @@ acpi_hw_write_multiple(u32 value,
return (status);
}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3c4a922a9fc2..0ed85cac3231 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -1,7 +1,7 @@
-
/******************************************************************************
*
- * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
+ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
+ * original/legacy sleep/PM registers.
*
*****************************************************************************/
@@ -43,213 +43,37 @@
*/
#include <acpi/acpi.h>
+#include <linux/acpi.h>
#include "accommon.h"
-#include "actables.h"
-#include <linux/tboot.h>
#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwsleep")
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/*******************************************************************************
*
- * FUNCTION: acpi_set_firmware_waking_vector
- *
- * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
- * entry point.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
- *
- ******************************************************************************/
-acpi_status
-acpi_set_firmware_waking_vector(u32 physical_address)
-{
- ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
-
-
- /*
- * According to the ACPI specification 2.0c and later, the 64-bit
- * waking vector should be cleared and the 32-bit waking vector should
- * be used, unless we want the wake-up code to be called by the BIOS in
- * Protected Mode. Some systems (for example HP dv5-1004nr) are known
- * to fail to resume if the 64-bit vector is used.
- */
-
- /* Set the 32-bit vector */
-
- acpi_gbl_FACS->firmware_waking_vector = physical_address;
-
- /* Clear the 64-bit vector if it exists */
-
- if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
- acpi_gbl_FACS->xfirmware_waking_vector = 0;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
-
-#if ACPI_MACHINE_WIDTH == 64
-/*******************************************************************************
- *
- * FUNCTION: acpi_set_firmware_waking_vector64
- *
- * PARAMETERS: physical_address - 64-bit physical address of ACPI protected
- * mode entry point.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
- * it exists in the table. This function is intended for use with
- * 64-bit host operating systems.
- *
- ******************************************************************************/
-acpi_status
-acpi_set_firmware_waking_vector64(u64 physical_address)
-{
- ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
-
-
- /* Determine if the 64-bit vector actually exists */
-
- if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- /* Clear 32-bit vector, set the 64-bit X_ vector */
-
- acpi_gbl_FACS->firmware_waking_vector = 0;
- acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
-
- return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_enter_sleep_state_prep
- *
- * PARAMETERS: sleep_state - Which sleep state to enter
- *
- * RETURN: Status
- *
- * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
- * This function must execute with interrupts enabled.
- * We break sleeping into 2 stages so that OSPM can handle
- * various OS-specific tasks between the two steps.
- *
- ******************************************************************************/
-acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
-{
- acpi_status status;
- struct acpi_object_list arg_list;
- union acpi_object arg;
-
- ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
-
- /* _PSW methods could be run here to enable wake-on keyboard, LAN, etc. */
-
- status = acpi_get_sleep_type_data(sleep_state,
- &acpi_gbl_sleep_type_a,
- &acpi_gbl_sleep_type_b);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Setup parameter object */
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
-
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = sleep_state;
-
- /* Run the _PTS method */
-
- status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- return_ACPI_STATUS(status);
- }
-
- /* Setup the argument to _SST */
-
- switch (sleep_state) {
- case ACPI_STATE_S0:
- arg.integer.value = ACPI_SST_WORKING;
- break;
-
- case ACPI_STATE_S1:
- case ACPI_STATE_S2:
- case ACPI_STATE_S3:
- arg.integer.value = ACPI_SST_SLEEPING;
- break;
-
- case ACPI_STATE_S4:
- arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
- break;
-
- default:
- arg.integer.value = ACPI_SST_INDICATOR_OFF; /* Default is off */
- break;
- }
-
- /*
- * Set the system indicators to show the desired sleep state.
- * _SST is an optional method (return no error if not found)
- */
- status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- ACPI_EXCEPTION((AE_INFO, status,
- "While executing method _SST"));
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
-
-static unsigned int gts, bfs;
-module_param(gts, uint, 0644);
-module_param(bfs, uint, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_enter_sleep_state
+ * FUNCTION: acpi_hw_legacy_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
+ * Flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
- * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ * DESCRIPTION: Enter a system sleep state via the legacy FADT PM registers
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
{
- u32 pm1a_control;
- u32 pm1b_control;
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
+ u32 pm1a_control;
+ u32 pm1b_control;
u32 in_value;
- struct acpi_object_list arg_list;
- union acpi_object arg;
acpi_status status;
- ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
-
- if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
- (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
- ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
- acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
- return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
- }
+ ACPI_FUNCTION_TRACE(hw_legacy_sleep);
sleep_type_reg_info =
acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
@@ -271,6 +95,18 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
+ if (sleep_state != ACPI_STATE_S5) {
+ /*
+ * Disable BM arbitration. This feature is contained within an
+ * optional register (PM2 Control), so ignore a BAD_ADDRESS
+ * exception.
+ */
+ status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
+ if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
/*
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
@@ -286,18 +122,10 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
- if (gts) {
- /* Execute the _GTS method */
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = sleep_state;
+ /* Optionally execute _GTS (Going To Sleep) */
- status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- return_ACPI_STATUS(status);
- }
+ if (flags & ACPI_EXECUTE_GTS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
}
/* Get current value of PM1A control */
@@ -344,8 +172,12 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
ACPI_FLUSH_CPU_CACHE();
- tboot_sleep(sleep_state, pm1a_control, pm1b_control);
-
+ status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
+ pm1b_control);
+ if (ACPI_SKIP(status))
+ return_ACPI_STATUS(AE_OK);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
/* Write #2: Write both SLP_TYP + SLP_EN */
status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
@@ -375,114 +207,44 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
}
}
- /* Wait until we enter sleep state */
-
- do {
- status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS,
- &in_value);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Spin until we wake */
-
- } while (!in_value);
-
- return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_enter_sleep_state_s4bios
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Perform a S4 bios request.
- * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
- *
- ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
-{
- u32 in_value;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
-
- /* Clear the wake status bit (PM1) */
-
- status =
- acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_hw_clear_acpi_status();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * 1) Disable/Clear all GPEs
- * 2) Enable all wakeup GPEs
- */
- status = acpi_hw_disable_all_gpes();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- acpi_gbl_system_awake_and_running = FALSE;
-
- status = acpi_hw_enable_all_wakeup_gpes();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- ACPI_FLUSH_CPU_CACHE();
-
- status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
- (u32) acpi_gbl_FADT.S4bios_request, 8);
+ /* Wait for transition back to Working State */
do {
- acpi_os_stall(1000);
status =
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+
} while (!in_value);
return_ACPI_STATUS(AE_OK);
}
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
-
/*******************************************************************************
*
- * FUNCTION: acpi_leave_sleep_state_prep
+ * FUNCTION: acpi_hw_legacy_wake_prep
*
- * PARAMETERS: sleep_state - Which sleep state we are exiting
+ * PARAMETERS: sleep_state - Which sleep state we just exited
+ * Flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
* DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
* sleep.
- * Called with interrupts DISABLED.
+ * Called with interrupts ENABLED.
*
******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
+
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
{
- struct acpi_object_list arg_list;
- union acpi_object arg;
acpi_status status;
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
u32 pm1a_control;
u32 pm1b_control;
- ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
+ ACPI_FUNCTION_TRACE(hw_legacy_wake_prep);
/*
* Set SLP_TYPE and SLP_EN to state S0.
@@ -525,27 +287,20 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
}
}
- if (bfs) {
- /* Execute the _BFS method */
+ /* Optionally execute _BFS (Back From Sleep) */
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = sleep_state;
-
- status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
- }
+ if (flags & ACPI_EXECUTE_BFS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
- * FUNCTION: acpi_leave_sleep_state
+ * FUNCTION: acpi_hw_legacy_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
+ * Flags - Reserved, set to zero
*
* RETURN: Status
*
@@ -553,31 +308,17 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
* Called with interrupts ENABLED.
*
******************************************************************************/
-acpi_status acpi_leave_sleep_state(u8 sleep_state)
+
+acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
{
- struct acpi_object_list arg_list;
- union acpi_object arg;
acpi_status status;
- ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
+ ACPI_FUNCTION_TRACE(hw_legacy_wake);
/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
-
- /* Setup parameter object */
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
-
- /* Ignore any errors from these methods */
-
- arg.integer.value = ACPI_SST_WAKING;
- status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
- }
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
/*
* GPEs must be enabled before _WAK is called as GPEs
@@ -591,46 +332,50 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+
status = acpi_hw_enable_all_runtime_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- arg.integer.value = sleep_state;
- status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
- }
- /* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
+ /*
+ * Now we can execute _WAK, etc. Some machines require that the GPEs
+ * are enabled before the wake methods are executed.
+ */
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
/*
- * Some BIOSes assume that WAK_STS will be cleared on resume and use
- * it to determine whether the system is rebooting or resuming. Clear
- * it for compatibility.
+ * Some BIOS code assumes that WAK_STS will be cleared on resume
+ * and use it to determine whether the system is rebooting or
+ * resuming. Clear WAK_STS for compatibility.
*/
acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1);
-
acpi_gbl_system_awake_and_running = TRUE;
/* Enable power button */
(void)
acpi_write_bit_register(acpi_gbl_fixed_event_info
- [ACPI_EVENT_POWER_BUTTON].
- enable_register_id, ACPI_ENABLE_EVENT);
+ [ACPI_EVENT_POWER_BUTTON].
+ enable_register_id, ACPI_ENABLE_EVENT);
(void)
acpi_write_bit_register(acpi_gbl_fixed_event_info
- [ACPI_EVENT_POWER_BUTTON].
- status_register_id, ACPI_CLEAR_STATUS);
+ [ACPI_EVENT_POWER_BUTTON].
+ status_register_id, ACPI_CLEAR_STATUS);
- arg.integer.value = ACPI_SST_WORKING;
- status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
+ /*
+ * Enable BM arbitration. This feature is contained within an
+ * optional register (PM2 Control), so ignore a BAD_ADDRESS
+ * exception.
+ */
+ status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
+ if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
+ return_ACPI_STATUS(status);
}
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index d4973d9da9f1..f1b2c3b94cac 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -49,6 +49,7 @@
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwtimer")
+#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/******************************************************************************
*
* FUNCTION: acpi_get_timer_resolution
@@ -187,3 +188,4 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
}
ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
+#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 9d38eb6c0d0b..ab513a972c95 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -74,8 +74,7 @@ acpi_status acpi_reset(void)
/* Check if the reset register is supported */
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
- !reset_reg->address) {
+ if (!reset_reg->address) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
@@ -138,11 +137,6 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
return (status);
}
- width = reg->bit_width;
- if (width == 64) {
- width = 32; /* Break into two 32-bit transfers */
- }
-
/* Initialize entire 64-bit return value to zero */
*return_value = 0;
@@ -154,24 +148,17 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
status = acpi_os_read_memory((acpi_physical_address)
- address, &value, width);
+ address, return_value,
+ reg->bit_width);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value = value;
-
- if (reg->bit_width == 64) {
-
- /* Read the top 32 bits */
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
- status = acpi_os_read_memory((acpi_physical_address)
- (address + 4), &value, 32);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- *return_value |= ((u64)value << 32);
+ width = reg->bit_width;
+ if (width == 64) {
+ width = 32; /* Break into two 32-bit transfers */
}
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
status = acpi_hw_read_port((acpi_io_address)
address, &value, width);
@@ -231,32 +218,22 @@ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
return (status);
}
- width = reg->bit_width;
- if (width == 64) {
- width = 32; /* Break into two 32-bit transfers */
- }
-
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
status = acpi_os_write_memory((acpi_physical_address)
- address, ACPI_LODWORD(value),
- width);
+ address, value, reg->bit_width);
if (ACPI_FAILURE(status)) {
return (status);
}
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
- if (reg->bit_width == 64) {
- status = acpi_os_write_memory((acpi_physical_address)
- (address + 4),
- ACPI_HIDWORD(value), 32);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ width = reg->bit_width;
+ if (width == 64) {
+ width = 32; /* Break into two 32-bit transfers */
}
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
status = acpi_hw_write_port((acpi_io_address)
address, ACPI_LODWORD(value),
@@ -286,6 +263,7 @@ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
ACPI_EXPORT_SYMBOL(acpi_write)
+#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_read_bit_register
@@ -453,7 +431,7 @@ unlock_and_exit:
}
ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
-
+#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
* FUNCTION: acpi_get_sleep_type_data
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
new file mode 100644
index 000000000000..762d059bb508
--- /dev/null
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -0,0 +1,431 @@
+/******************************************************************************
+ *
+ * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include <linux/module.h>
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwxfsleep")
+
+/* Local prototypes */
+static acpi_status
+acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
+
+/*
+ * Dispatch table used to efficiently branch to the various sleep
+ * functions.
+ */
+#define ACPI_SLEEP_FUNCTION_ID 0
+#define ACPI_WAKE_PREP_FUNCTION_ID 1
+#define ACPI_WAKE_FUNCTION_ID 2
+
+/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
+
+static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
+ {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
+ acpi_hw_extended_sleep},
+ {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
+ acpi_hw_extended_wake_prep},
+ {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
+};
+
+/*
+ * These functions are removed for the ACPI_REDUCED_HARDWARE case:
+ * acpi_set_firmware_waking_vector
+ * acpi_set_firmware_waking_vector64
+ * acpi_enter_sleep_state_s4bios
+ */
+
+#if (!ACPI_REDUCED_HARDWARE)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
+ * entry point.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ *
+ ******************************************************************************/
+
+acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
+{
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+
+
+ /*
+ * According to the ACPI specification 2.0c and later, the 64-bit
+ * waking vector should be cleared and the 32-bit waking vector should
+ * be used, unless we want the wake-up code to be called by the BIOS in
+ * Protected Mode. Some systems (for example HP dv5-1004nr) are known
+ * to fail to resume if the 64-bit vector is used.
+ */
+
+ /* Set the 32-bit vector */
+
+ acpi_gbl_FACS->firmware_waking_vector = physical_address;
+
+ /* Clear the 64-bit vector if it exists */
+
+ if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
+ acpi_gbl_FACS->xfirmware_waking_vector = 0;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+
+#if ACPI_MACHINE_WIDTH == 64
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_firmware_waking_vector64
+ *
+ * PARAMETERS: physical_address - 64-bit physical address of ACPI protected
+ * mode entry point.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
+ * it exists in the table. This function is intended for use with
+ * 64-bit host operating systems.
+ *
+ ******************************************************************************/
+acpi_status acpi_set_firmware_waking_vector64(u64 physical_address)
+{
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
+
+
+ /* Determine if the 64-bit vector actually exists */
+
+ if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Clear 32-bit vector, set the 64-bit X_ vector */
+
+ acpi_gbl_FACS->firmware_waking_vector = 0;
+ acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enter_sleep_state_s4bios
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform a S4 bios request.
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+{
+ u32 in_value;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
+
+ /* Clear the wake status bit (PM1) */
+
+ status =
+ acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_clear_acpi_status();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * 1) Disable/Clear all GPEs
+ * 2) Enable all wakeup GPEs
+ */
+ status = acpi_hw_disable_all_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ acpi_gbl_system_awake_and_running = FALSE;
+
+ status = acpi_hw_enable_all_wakeup_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
+ (u32)acpi_gbl_FADT.S4bios_request, 8);
+
+ do {
+ acpi_os_stall(1000);
+ status =
+ acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } while (!in_value);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
+#endif /* !ACPI_REDUCED_HARDWARE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_sleep_dispatch
+ *
+ * PARAMETERS: sleep_state - Which sleep state to enter/exit
+ * function_id - Sleep, wake_prep, or Wake
+ *
+ * RETURN: Status from the invoked sleep handling function.
+ *
+ * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling
+ * function.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
+{
+ acpi_status status;
+ struct acpi_sleep_functions *sleep_functions =
+ &acpi_sleep_dispatch[function_id];
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+ /*
+ * If the Hardware Reduced flag is set (from the FADT), we must
+ * use the extended sleep registers
+ */
+ if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
+ status = sleep_functions->extended_function(sleep_state, flags);
+ } else {
+ /* Legacy sleep */
+
+ status = sleep_functions->legacy_function(sleep_state, flags);
+ }
+
+ return (status);
+
+#else
+ /*
+ * For the case where reduced-hardware-only code is being generated,
+ * we know that only the extended sleep registers are available
+ */
+ status = sleep_functions->extended_function(sleep_state, flags);
+ return (status);
+
+#endif /* !ACPI_REDUCED_HARDWARE */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enter_sleep_state_prep
+ *
+ * PARAMETERS: sleep_state - Which sleep state to enter
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Prepare to enter a system sleep state.
+ * This function must execute with interrupts enabled.
+ * We break sleeping into 2 stages so that OSPM can handle
+ * various OS-specific tasks between the two steps.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
+{
+ acpi_status status;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ u32 sst_value;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
+
+ status = acpi_get_sleep_type_data(sleep_state,
+ &acpi_gbl_sleep_type_a,
+ &acpi_gbl_sleep_type_b);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Execute the _PTS method (Prepare To Sleep) */
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = sleep_state;
+
+ status =
+ acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Setup the argument to the _SST method (System STatus) */
+
+ switch (sleep_state) {
+ case ACPI_STATE_S0:
+ sst_value = ACPI_SST_WORKING;
+ break;
+
+ case ACPI_STATE_S1:
+ case ACPI_STATE_S2:
+ case ACPI_STATE_S3:
+ sst_value = ACPI_SST_SLEEPING;
+ break;
+
+ case ACPI_STATE_S4:
+ sst_value = ACPI_SST_SLEEP_CONTEXT;
+ break;
+
+ default:
+ sst_value = ACPI_SST_INDICATOR_OFF; /* Default is off */
+ break;
+ }
+
+ /*
+ * Set the system indicators to show the desired sleep state.
+ * _SST is an optional method (return no error if not found)
+ */
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value);
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enter_sleep_state
+ *
+ * PARAMETERS: sleep_state - Which sleep state to enter
+ * Flags - ACPI_EXECUTE_GTS to run optional method
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
+
+ if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
+ (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
+ ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
+ acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ }
+
+ status =
+ acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_leave_sleep_state_prep
+ *
+ * PARAMETERS: sleep_state - Which sleep state we are exiting
+ * Flags - ACPI_EXECUTE_BFS to run optional method
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
+ * sleep.
+ * Called with interrupts DISABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
+
+ status =
+ acpi_hw_sleep_dispatch(sleep_state, flags,
+ ACPI_WAKE_PREP_FUNCTION_ID);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_leave_sleep_state
+ *
+ * PARAMETERS: sleep_state - Which sleep state we are exiting
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
+ * Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state(u8 sleep_state)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
+
+
+ status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b7f2b3be79ac..3f7f3f6e7dd5 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -242,7 +242,20 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
if (!obj_desc) {
- /* No attached object, we are done */
+ /* No attached object. Some types should always have an object */
+
+ switch (type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_PACKAGE:
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_METHOD:
+ acpi_os_printf("<No attached object>");
+ break;
+
+ default:
+ break;
+ }
acpi_os_printf("\n");
return (AE_OK);
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 30ea5bc53a78..3b5acb0eb406 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -121,7 +121,7 @@ void acpi_ns_dump_root_devices(void)
return;
}
- status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle);
+ status = acpi_get_handle(NULL, METHOD_NAME__SB_, &sys_bus_handle);
if (ACPI_FAILURE(status)) {
return;
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index bbe46a447d34..23ce09686418 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -638,8 +638,8 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
/* Create the new outer package and populate it */
status =
- acpi_ns_repair_package_list(data,
- return_object_ptr);
+ acpi_ns_wrap_with_package(data, *elements,
+ return_object_ptr);
if (ACPI_FAILURE(status)) {
return (status);
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9c35d20eb52b..5519a64a353f 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -71,11 +71,10 @@ ACPI_MODULE_NAME("nsrepair")
* Buffer -> String
* Buffer -> Package of Integers
* Package -> Package of one Package
+ * An incorrect standalone object is wrapped with required outer package
*
* Additional possible repairs:
- *
* Required package elements that are NULL replaced by Integer/String/Buffer
- * Incorrect standalone package wrapped with required outer package
*
******************************************************************************/
/* Local prototypes */
@@ -91,10 +90,6 @@ static acpi_status
acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
union acpi_operand_object **return_object);
-static acpi_status
-acpi_ns_convert_to_package(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_repair_object
@@ -151,9 +146,24 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
}
}
if (expected_btypes & ACPI_RTYPE_PACKAGE) {
- status = acpi_ns_convert_to_package(return_object, &new_object);
+ /*
+ * A package is expected. We will wrap the existing object with a
+ * new package object. It is often the case that if a variable-length
+ * package is required, but there is only a single object needed, the
+ * BIOS will return that object instead of wrapping it with a Package
+ * object. Note: after the wrapping, the package will be validated
+ * for correct contents (expected object type or types).
+ */
+ status =
+ acpi_ns_wrap_with_package(data, return_object, &new_object);
if (ACPI_SUCCESS(status)) {
- goto object_repaired;
+ /*
+ * The original object just had its reference count
+ * incremented for being inserted into the new package.
+ */
+ *return_object_ptr = new_object; /* New Package object */
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
}
}
@@ -165,22 +175,27 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
/* Object was successfully repaired */
- /*
- * If the original object is a package element, we need to:
- * 1. Set the reference count of the new object to match the
- * reference count of the old object.
- * 2. Decrement the reference count of the original object.
- */
if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
- new_object->common.reference_count =
- return_object->common.reference_count;
+ /*
+ * The original object is a package element. We need to
+ * decrement the reference count of the original object,
+ * for removing it from the package.
+ *
+ * However, if the original object was just wrapped with a
+ * package object as part of the repair, we don't need to
+ * change the reference count.
+ */
+ if (!(data->flags & ACPI_OBJECT_WRAPPED)) {
+ new_object->common.reference_count =
+ return_object->common.reference_count;
- if (return_object->common.reference_count > 1) {
- return_object->common.reference_count--;
+ if (return_object->common.reference_count > 1) {
+ return_object->common.reference_count--;
+ }
}
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
- "%s: Converted %s to expected %s at index %u\n",
+ "%s: Converted %s to expected %s at Package index %u\n",
data->pathname,
acpi_ut_get_object_type_name(return_object),
acpi_ut_get_object_type_name(new_object),
@@ -453,65 +468,6 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_convert_to_package
- *
- * PARAMETERS: original_object - Object to be converted
- * return_object - Where the new converted object is returned
- *
- * RETURN: Status. AE_OK if conversion was successful.
- *
- * DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
- * the buffer is converted to a single integer package element.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_convert_to_package(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object)
-{
- union acpi_operand_object *new_object;
- union acpi_operand_object **elements;
- u32 length;
- u8 *buffer;
-
- switch (original_object->common.type) {
- case ACPI_TYPE_BUFFER:
-
- /* Buffer-to-Package conversion */
-
- length = original_object->buffer.length;
- new_object = acpi_ut_create_package_object(length);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
-
- /* Convert each buffer byte to an integer package element */
-
- elements = new_object->package.elements;
- buffer = original_object->buffer.pointer;
-
- while (length--) {
- *elements =
- acpi_ut_create_integer_object((u64) *buffer);
- if (!*elements) {
- acpi_ut_remove_reference(new_object);
- return (AE_NO_MEMORY);
- }
- elements++;
- buffer++;
- }
- break;
-
- default:
- return (AE_AML_OPERAND_TYPE);
- }
-
- *return_object = new_object;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_repair_null_element
*
* PARAMETERS: Data - Pointer to validation data structure
@@ -677,55 +633,56 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_repair_package_list
+ * FUNCTION: acpi_ns_wrap_with_package
*
* PARAMETERS: Data - Pointer to validation data structure
- * obj_desc_ptr - Pointer to the object to repair. The new
- * package object is returned here,
- * overwriting the old object.
+ * original_object - Pointer to the object to repair.
+ * obj_desc_ptr - The new package object is returned here
*
* RETURN: Status, new object in *obj_desc_ptr
*
- * DESCRIPTION: Repair a common problem with objects that are defined to return
- * a variable-length Package of Packages. If the variable-length
- * is one, some BIOS code mistakenly simply declares a single
- * Package instead of a Package with one sub-Package. This
- * function attempts to repair this error by wrapping a Package
- * object around the original Package, creating the correct
- * Package with one sub-Package.
+ * DESCRIPTION: Repair a common problem with objects that are defined to
+ * return a variable-length Package of sub-objects. If there is
+ * only one sub-object, some BIOS code mistakenly simply declares
+ * the single object instead of a Package with one sub-object.
+ * This function attempts to repair this error by wrapping a
+ * Package object around the original object, creating the
+ * correct and expected Package with one sub-object.
*
* Names that can be repaired in this manner include:
- * _ALR, _CSD, _HPX, _MLS, _PRT, _PSS, _TRT, TSS
+ * _ALR, _CSD, _HPX, _MLS, _PLD, _PRT, _PSS, _TRT, _TSS,
+ * _BCL, _DOD, _FIX, _Sx
*
******************************************************************************/
acpi_status
-acpi_ns_repair_package_list(struct acpi_predefined_data *data,
- union acpi_operand_object **obj_desc_ptr)
+acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
+ union acpi_operand_object *original_object,
+ union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *pkg_obj_desc;
- ACPI_FUNCTION_NAME(ns_repair_package_list);
+ ACPI_FUNCTION_NAME(ns_wrap_with_package);
/*
* Create the new outer package and populate it. The new package will
- * have a single element, the lone subpackage.
+ * have a single element, the lone sub-object.
*/
pkg_obj_desc = acpi_ut_create_package_object(1);
if (!pkg_obj_desc) {
return (AE_NO_MEMORY);
}
- pkg_obj_desc->package.elements[0] = *obj_desc_ptr;
+ pkg_obj_desc->package.elements[0] = original_object;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Wrapped %s with expected Package object\n",
+ data->pathname,
+ acpi_ut_get_object_type_name(original_object)));
/* Return the new object in the object pointer */
*obj_desc_ptr = pkg_obj_desc;
- data->flags |= ACPI_OBJECT_REPAIRED;
-
- ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
- "%s: Repaired incorrectly formed Package\n",
- data->pathname));
-
+ data->flags |= ACPI_OBJECT_REPAIRED | ACPI_OBJECT_WRAPPED;
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a535b7afda5c..75113759f69d 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -341,7 +341,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
if (!acpi_ns_valid_path_separator(*external_name) &&
(*external_name != 0)) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_BAD_PATHNAME);
}
/* Move on the next segment */
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index c5d870406f41..4c9c760db4a4 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -363,10 +363,6 @@ static void acpi_tb_convert_fadt(void)
u32 address32;
u32 i;
- /* Update the local FADT table header length */
-
- acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
-
/*
* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
* Later code will always use the X 64-bit field. Also, check for an
@@ -408,6 +404,10 @@ static void acpi_tb_convert_fadt(void)
acpi_gbl_FADT.boot_flags = 0;
}
+ /* Update the local FADT table header length */
+
+ acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+
/*
* Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
* generic address structures as necessary. Later code will always use
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 1aecf7baa4e0..c03500b4cc7a 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -114,7 +114,6 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
{
u32 i;
acpi_status status = AE_OK;
- struct acpi_table_header *override_table = NULL;
ACPI_FUNCTION_TRACE(tb_add_table);
@@ -224,25 +223,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
/*
* ACPI Table Override:
* Allow the host to override dynamically loaded tables.
+ * NOTE: the table is fully mapped at this point, and the mapping will
+ * be deleted by tb_table_override if the table is actually overridden.
*/
- status = acpi_os_table_override(table_desc->pointer, &override_table);
- if (ACPI_SUCCESS(status) && override_table) {
- ACPI_INFO((AE_INFO,
- "%4.4s @ 0x%p Table override, replaced with:",
- table_desc->pointer->signature,
- ACPI_CAST_PTR(void, table_desc->address)));
-
- /* We can delete the table that was passed as a parameter */
-
- acpi_tb_delete_table(table_desc);
-
- /* Setup descriptor for the new table */
-
- table_desc->address = ACPI_PTR_TO_PHYSADDR(override_table);
- table_desc->pointer = override_table;
- table_desc->length = override_table->length;
- table_desc->flags = ACPI_TABLE_ORIGIN_OVERRIDE;
- }
+ (void)acpi_tb_table_override(table_desc->pointer, table_desc);
/* Add the table to the global root table list */
@@ -263,6 +247,95 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_table_override
+ *
+ * PARAMETERS: table_header - Header for the original table
+ * table_desc - Table descriptor initialized for the
+ * original table. May or may not be mapped.
+ *
+ * RETURN: Pointer to the entire new table. NULL if table not overridden.
+ * If overridden, installs the new table within the input table
+ * descriptor.
+ *
+ * DESCRIPTION: Attempt table override by calling the OSL override functions.
+ * Note: If the table is overridden, then the entire new table
+ * is mapped and returned by this function.
+ *
+ ******************************************************************************/
+
+struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
+ *table_header,
+ struct acpi_table_desc
+ *table_desc)
+{
+ acpi_status status;
+ struct acpi_table_header *new_table = NULL;
+ acpi_physical_address new_address = 0;
+ u32 new_table_length = 0;
+ u8 new_flags;
+ char *override_type;
+
+ /* (1) Attempt logical override (returns a logical address) */
+
+ status = acpi_os_table_override(table_header, &new_table);
+ if (ACPI_SUCCESS(status) && new_table) {
+ new_address = ACPI_PTR_TO_PHYSADDR(new_table);
+ new_table_length = new_table->length;
+ new_flags = ACPI_TABLE_ORIGIN_OVERRIDE;
+ override_type = "Logical";
+ goto finish_override;
+ }
+
+ /* (2) Attempt physical override (returns a physical address) */
+
+ status = acpi_os_physical_table_override(table_header,
+ &new_address,
+ &new_table_length);
+ if (ACPI_SUCCESS(status) && new_address && new_table_length) {
+
+ /* Map the entire new table */
+
+ new_table = acpi_os_map_memory(new_address, new_table_length);
+ if (!new_table) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ "%4.4s %p Attempted physical table override failed",
+ table_header->signature,
+ ACPI_CAST_PTR(void,
+ table_desc->address)));
+ return (NULL);
+ }
+
+ override_type = "Physical";
+ new_flags = ACPI_TABLE_ORIGIN_MAPPED;
+ goto finish_override;
+ }
+
+ return (NULL); /* There was no override */
+
+ finish_override:
+
+ ACPI_INFO((AE_INFO,
+ "%4.4s %p %s table override, new table: %p",
+ table_header->signature,
+ ACPI_CAST_PTR(void, table_desc->address),
+ override_type, new_table));
+
+ /* We can now unmap/delete the original table (if fully mapped) */
+
+ acpi_tb_delete_table(table_desc);
+
+ /* Setup descriptor for the new table */
+
+ table_desc->address = new_address;
+ table_desc->pointer = new_table;
+ table_desc->length = new_table_length;
+ table_desc->flags = new_flags;
+
+ return (new_table);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_resize_root_table_list
*
* PARAMETERS: None
@@ -396,7 +469,11 @@ void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
case ACPI_TABLE_ORIGIN_ALLOCATED:
ACPI_FREE(table_desc->pointer);
break;
- default:;
+
+ /* Not mapped or allocated, there is nothing we can do */
+
+ default:
+ return;
}
table_desc->pointer = NULL;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 09ca39e14337..0a706cac37de 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -118,6 +118,7 @@ acpi_tb_check_xsdt(acpi_physical_address address)
return AE_OK;
}
+#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_tb_initialize_facs
@@ -148,6 +149,7 @@ acpi_status acpi_tb_initialize_facs(void)
&acpi_gbl_FACS));
return status;
}
+#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
@@ -444,7 +446,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
* RETURN: None
*
* DESCRIPTION: Install an ACPI table into the global data structure. The
- * table override mechanism is implemented here to allow the host
+ * table override mechanism is called to allow the host
* OS to replace any table before it is installed in the root
* table array.
*
@@ -454,11 +456,9 @@ void
acpi_tb_install_table(acpi_physical_address address,
char *signature, u32 table_index)
{
- u8 flags;
- acpi_status status;
- struct acpi_table_header *table_to_install;
- struct acpi_table_header *mapped_table;
- struct acpi_table_header *override_table = NULL;
+ struct acpi_table_header *table;
+ struct acpi_table_header *final_table;
+ struct acpi_table_desc *table_desc;
if (!address) {
ACPI_ERROR((AE_INFO,
@@ -469,69 +469,78 @@ acpi_tb_install_table(acpi_physical_address address,
/* Map just the table header */
- mapped_table =
- acpi_os_map_memory(address, sizeof(struct acpi_table_header));
- if (!mapped_table) {
+ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+ if (!table) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory for table [%s] at %p",
+ signature, ACPI_CAST_PTR(void, address)));
return;
}
/* If a particular signature is expected (DSDT/FACS), it must match */
- if (signature && !ACPI_COMPARE_NAME(mapped_table->signature, signature)) {
+ if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
ACPI_ERROR((AE_INFO,
"Invalid signature 0x%X for ACPI table, expected [%s]",
- *ACPI_CAST_PTR(u32, mapped_table->signature),
- signature));
+ *ACPI_CAST_PTR(u32, table->signature), signature));
goto unmap_and_exit;
}
/*
+ * Initialize the table entry. Set the pointer to NULL, since the
+ * table is not fully mapped at this time.
+ */
+ table_desc = &acpi_gbl_root_table_list.tables[table_index];
+
+ table_desc->address = address;
+ table_desc->pointer = NULL;
+ table_desc->length = table->length;
+ table_desc->flags = ACPI_TABLE_ORIGIN_MAPPED;
+ ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
+
+ /*
* ACPI Table Override:
*
* Before we install the table, let the host OS override it with a new
* one if desired. Any table within the RSDT/XSDT can be replaced,
* including the DSDT which is pointed to by the FADT.
+ *
+ * NOTE: If the table is overridden, then final_table will contain a
+ * mapped pointer to the full new table. If the table is not overridden,
+ * or if there has been a physical override, then the table will be
+ * fully mapped later (in verify table). In any case, we must
+ * unmap the header that was mapped above.
*/
- status = acpi_os_table_override(mapped_table, &override_table);
- if (ACPI_SUCCESS(status) && override_table) {
- ACPI_INFO((AE_INFO,
- "%4.4s @ 0x%p Table override, replaced with:",
- mapped_table->signature, ACPI_CAST_PTR(void,
- address)));
-
- acpi_gbl_root_table_list.tables[table_index].pointer =
- override_table;
- address = ACPI_PTR_TO_PHYSADDR(override_table);
-
- table_to_install = override_table;
- flags = ACPI_TABLE_ORIGIN_OVERRIDE;
- } else {
- table_to_install = mapped_table;
- flags = ACPI_TABLE_ORIGIN_MAPPED;
+ final_table = acpi_tb_table_override(table, table_desc);
+ if (!final_table) {
+ final_table = table; /* There was no override */
}
- /* Initialize the table entry */
+ acpi_tb_print_table_header(table_desc->address, final_table);
- acpi_gbl_root_table_list.tables[table_index].address = address;
- acpi_gbl_root_table_list.tables[table_index].length =
- table_to_install->length;
- acpi_gbl_root_table_list.tables[table_index].flags = flags;
-
- ACPI_MOVE_32_TO_32(&
- (acpi_gbl_root_table_list.tables[table_index].
- signature), table_to_install->signature);
-
- acpi_tb_print_table_header(address, table_to_install);
+ /* Set the global integer width (based upon revision of the DSDT) */
if (table_index == ACPI_TABLE_INDEX_DSDT) {
+ acpi_ut_set_integer_width(final_table->revision);
+ }
- /* Global integer width is based upon revision of the DSDT */
-
- acpi_ut_set_integer_width(table_to_install->revision);
+ /*
+ * If we have a physical override during this early loading of the ACPI
+ * tables, unmap the table for now. It will be mapped again later when
+ * it is actually used. This supports very early loading of ACPI tables,
+ * before virtual memory is fully initialized and running within the
+ * host OS. Note: A logical override has the ACPI_TABLE_ORIGIN_OVERRIDE
+ * flag set and will not be deleted below.
+ */
+ if (final_table != table) {
+ acpi_tb_delete_table(table_desc);
}
unmap_and_exit:
- acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header));
+
+ /* Always unmap the table header that we mapped above */
+
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index d42ede5260c7..684849949bf3 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -497,19 +497,20 @@ char *acpi_ut_get_mutex_name(u32 mutex_id)
/* Names for Notify() values, used for debug output */
-static const char *acpi_gbl_notify_value_names[] = {
- "Bus Check",
- "Device Check",
- "Device Wake",
- "Eject Request",
- "Device Check Light",
- "Frequency Mismatch",
- "Bus Mode Mismatch",
- "Power Fault",
- "Capabilities Check",
- "Device PLD Check",
- "Reserved",
- "System Locality Update"
+static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = {
+ /* 00 */ "Bus Check",
+ /* 01 */ "Device Check",
+ /* 02 */ "Device Wake",
+ /* 03 */ "Eject Request",
+ /* 04 */ "Device Check Light",
+ /* 05 */ "Frequency Mismatch",
+ /* 06 */ "Bus Mode Mismatch",
+ /* 07 */ "Power Fault",
+ /* 08 */ "Capabilities Check",
+ /* 09 */ "Device PLD Check",
+ /* 10 */ "Reserved",
+ /* 11 */ "System Locality Update",
+ /* 12 */ "Shutdown Request"
};
const char *acpi_ut_get_notify_name(u32 notify_value)
@@ -519,9 +520,10 @@ const char *acpi_ut_get_notify_name(u32 notify_value)
return (acpi_gbl_notify_value_names[notify_value]);
} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
return ("Reserved");
- } else { /* Greater or equal to 0x80 */
-
- return ("**Device Specific**");
+ } else if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) {
+ return ("Device Specific");
+ } else {
+ return ("Hardware Specific");
}
}
#endif
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 4153584cf526..90f53b42eca9 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -140,6 +140,7 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{NULL, ACPI_TYPE_ANY, NULL}
};
+#if (!ACPI_REDUCED_HARDWARE)
/******************************************************************************
*
* Event and Hardware globals
@@ -236,6 +237,7 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
ACPI_BITMASK_RT_CLOCK_STATUS,
ACPI_BITMASK_RT_CLOCK_ENABLE},
};
+#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
@@ -286,6 +288,8 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+#if (!ACPI_REDUCED_HARDWARE)
+
/* GPE support */
acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -294,6 +298,10 @@ acpi_status acpi_ut_init_globals(void)
acpi_current_gpe_count = 0;
acpi_gbl_all_gpes_initialized = FALSE;
+ acpi_gbl_global_event_handler = NULL;
+
+#endif /* !ACPI_REDUCED_HARDWARE */
+
/* Global handlers */
acpi_gbl_system_notify.handler = NULL;
@@ -302,7 +310,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_init_handler = NULL;
acpi_gbl_table_handler = NULL;
acpi_gbl_interface_handler = NULL;
- acpi_gbl_global_event_handler = NULL;
/* Global Lock support */
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 8359c0c5dc98..246798e4c938 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -53,27 +53,35 @@ ACPI_MODULE_NAME("utinit")
/* Local prototypes */
static void acpi_ut_terminate(void);
+#if (!ACPI_REDUCED_HARDWARE)
+
+static void acpi_ut_free_gpe_lists(void);
+
+#else
+
+#define acpi_ut_free_gpe_lists()
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+#if (!ACPI_REDUCED_HARDWARE)
/******************************************************************************
*
- * FUNCTION: acpi_ut_terminate
+ * FUNCTION: acpi_ut_free_gpe_lists
*
* PARAMETERS: none
*
* RETURN: none
*
- * DESCRIPTION: Free global memory
+ * DESCRIPTION: Free global GPE lists
*
******************************************************************************/
-static void acpi_ut_terminate(void)
+static void acpi_ut_free_gpe_lists(void)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_block_info *next_gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
- ACPI_FUNCTION_TRACE(ut_terminate);
-
/* Free global GPE blocks and related info structures */
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
@@ -91,7 +99,26 @@ static void acpi_ut_terminate(void)
ACPI_FREE(gpe_xrupt_info);
gpe_xrupt_info = next_gpe_xrupt_info;
}
+}
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_terminate
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Free global memory
+ *
+ ******************************************************************************/
+
+static void acpi_ut_terminate(void)
+{
+ ACPI_FUNCTION_TRACE(ut_terminate);
+ acpi_ut_free_gpe_lists();
acpi_ut_delete_address_lists();
return_VOID;
}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 644e8c8ebc4b..afa94f51ff0b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -145,6 +145,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
+#if (!ACPI_REDUCED_HARDWARE)
+
/* Enable ACPI mode */
if (!(flags & ACPI_NO_ACPI_ENABLE)) {
@@ -169,6 +171,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
return_ACPI_STATUS(status);
}
+#endif /* !ACPI_REDUCED_HARDWARE */
/*
* Install the default op_region handlers. These are installed unless
@@ -184,7 +187,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
}
-
+#if (!ACPI_REDUCED_HARDWARE)
/*
* Initialize ACPI Event handling (Fixed and General Purpose)
*
@@ -220,6 +223,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
}
+#endif /* !ACPI_REDUCED_HARDWARE */
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index e5d53b7ddc7e..5577762daee1 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -558,33 +558,48 @@ void apei_resources_release(struct apei_resources *resources)
}
EXPORT_SYMBOL_GPL(apei_resources_release);
-static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
+ u32 *access_bit_width)
{
- u32 width, space_id;
+ u32 bit_width, bit_offset, access_size_code, space_id;
- width = reg->bit_width;
+ bit_width = reg->bit_width;
+ bit_offset = reg->bit_offset;
+ access_size_code = reg->access_width;
space_id = reg->space_id;
/* Handle possible alignment issues */
memcpy(paddr, &reg->address, sizeof(*paddr));
if (!*paddr) {
pr_warning(FW_BUG APEI_PFX
- "Invalid physical address in GAR [0x%llx/%u/%u]\n",
- *paddr, width, space_id);
+ "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
- if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (access_size_code < 1 || access_size_code > 4) {
pr_warning(FW_BUG APEI_PFX
- "Invalid bit width in GAR [0x%llx/%u/%u]\n",
- *paddr, width, space_id);
+ "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
+ return -EINVAL;
+ }
+ *access_bit_width = 1UL << (access_size_code + 2);
+
+ if ((bit_width + bit_offset) > *access_bit_width) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
pr_warning(FW_BUG APEI_PFX
- "Invalid address space type in GAR [0x%llx/%u/%u]\n",
- *paddr, width, space_id);
+ "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
@@ -595,23 +610,25 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
int apei_read(u64 *val, struct acpi_generic_address *reg)
{
int rc;
+ u32 access_bit_width;
u64 address;
acpi_status status;
- rc = apei_check_gar(reg, &address);
+ rc = apei_check_gar(reg, &address, &access_bit_width);
if (rc)
return rc;
*val = 0;
switch(reg->space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- status = acpi_os_read_memory64((acpi_physical_address)
- address, val, reg->bit_width);
+ status = acpi_os_read_memory((acpi_physical_address) address,
+ val, access_bit_width);
if (ACPI_FAILURE(status))
return -EIO;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
- status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+ status = acpi_os_read_port(address, (u32 *)val,
+ access_bit_width);
if (ACPI_FAILURE(status))
return -EIO;
break;
@@ -627,22 +644,23 @@ EXPORT_SYMBOL_GPL(apei_read);
int apei_write(u64 val, struct acpi_generic_address *reg)
{
int rc;
+ u32 access_bit_width;
u64 address;
acpi_status status;
- rc = apei_check_gar(reg, &address);
+ rc = apei_check_gar(reg, &address, &access_bit_width);
if (rc)
return rc;
switch (reg->space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- status = acpi_os_write_memory64((acpi_physical_address)
- address, val, reg->bit_width);
+ status = acpi_os_write_memory((acpi_physical_address) address,
+ val, access_bit_width);
if (ACPI_FAILURE(status))
return -EIO;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
- status = acpi_os_write_port(address, val, reg->bit_width);
+ status = acpi_os_write_port(address, val, access_bit_width);
if (ACPI_FAILURE(status))
return -EIO;
break;
@@ -661,23 +679,24 @@ static int collect_res_callback(struct apei_exec_context *ctx,
struct apei_resources *resources = data;
struct acpi_generic_address *reg = &entry->register_region;
u8 ins = entry->instruction;
+ u32 access_bit_width;
u64 paddr;
int rc;
if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
return 0;
- rc = apei_check_gar(reg, &paddr);
+ rc = apei_check_gar(reg, &paddr, &access_bit_width);
if (rc)
return rc;
switch (reg->space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
return apei_res_add(&resources->iomem, paddr,
- reg->bit_width / 8);
+ access_bit_width / 8);
case ACPI_ADR_SPACE_SYSTEM_IO:
return apei_res_add(&resources->ioport, paddr,
- reg->bit_width / 8);
+ access_bit_width / 8);
default:
return -EINVAL;
}
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 5d4189464d63..e6defd86b424 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -362,6 +362,7 @@ void apei_estatus_print(const char *pfx,
gedata_len = gdata->error_data_length;
apei_estatus_print_section(pfx, gdata, sec_no);
data_len -= gedata_len + sizeof(*gdata);
+ gdata = (void *)(gdata + 1) + gedata_len;
sec_no++;
}
}
@@ -396,6 +397,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
if (gedata_len > data_len - sizeof(*gdata))
return -EINVAL;
data_len -= gedata_len + sizeof(*gdata);
+ gdata = (void *)(gdata + 1) + gedata_len;
}
if (data_len)
return -EINVAL;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 4ca087dd5f4f..8e1793649ec0 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -74,6 +74,8 @@ struct vendor_error_type_extension {
u8 reserved[3];
};
+static u32 notrigger;
+
static u32 vendor_flags;
static struct debugfs_blob_wrapper vendor_blob;
static char vendor_dev[64];
@@ -238,7 +240,7 @@ static void *einj_get_parameter_address(void)
return v5param;
}
}
- if (paddrv4) {
+ if (param_extension && paddrv4) {
struct einj_parameter *v4param;
v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
@@ -496,9 +498,11 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
trigger_paddr = apei_exec_ctx_get_output(&ctx);
- rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
- if (rc)
- return rc;
+ if (notrigger == 0) {
+ rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
+ if (rc)
+ return rc;
+ }
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
return rc;
@@ -700,6 +704,11 @@ static int __init einj_init(void)
einj_debug_dir, &error_param2);
if (!fentry)
goto err_unmap;
+
+ fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &notrigger);
+ if (!fentry)
+ goto err_unmap;
}
if (vendor_dev[0]) {
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index eb9fab5b96e4..e4d9d24eb73d 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -917,7 +917,7 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
{
if ((erst_tab->header_length !=
(sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
- && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
+ && (erst_tab->header_length != sizeof(struct acpi_table_erst)))
return -EINVAL;
if (erst_tab->header.length < sizeof(struct acpi_table_erst))
return -EINVAL;
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
new file mode 100644
index 000000000000..8cf6c46e99fb
--- /dev/null
+++ b/drivers/acpi/bgrt.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+
+static struct acpi_table_bgrt *bgrt_tab;
+static struct kobject *bgrt_kobj;
+
+struct bmp_header {
+ u16 id;
+ u32 size;
+} __attribute ((packed));
+
+static struct bmp_header bmp_header;
+
+static ssize_t show_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
+}
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+
+static ssize_t show_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
+}
+static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+
+static ssize_t show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
+}
+static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
+
+static ssize_t show_xoffset(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
+}
+static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
+
+static ssize_t show_yoffset(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
+}
+static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
+
+static ssize_t show_image(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+ int size = attr->size;
+ void __iomem *image = attr->private;
+
+ if (off >= size) {
+ count = 0;
+ } else {
+ if (off + count > size)
+ count = size - off;
+
+ memcpy_fromio(buf, image+off, count);
+ }
+
+ return count;
+}
+
+static struct bin_attribute image_attr = {
+ .attr = {
+ .name = "image",
+ .mode = S_IRUGO,
+ },
+ .read = show_image,
+};
+
+static struct attribute *bgrt_attributes[] = {
+ &dev_attr_version.attr,
+ &dev_attr_status.attr,
+ &dev_attr_type.attr,
+ &dev_attr_xoffset.attr,
+ &dev_attr_yoffset.attr,
+ NULL,
+};
+
+static struct attribute_group bgrt_attribute_group = {
+ .attrs = bgrt_attributes,
+};
+
+static int __init bgrt_init(void)
+{
+ acpi_status status;
+ int ret;
+ void __iomem *bgrt;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table("BGRT", 0,
+ (struct acpi_table_header **)&bgrt_tab);
+
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ sysfs_bin_attr_init(&image_attr);
+
+ bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header));
+
+ if (!bgrt) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header));
+ image_attr.size = bmp_header.size;
+ iounmap(bgrt);
+
+ image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size);
+
+ if (!image_attr.private) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+
+ bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
+ if (!bgrt_kobj) {
+ ret = -EINVAL;
+ goto out_iounmap;
+ }
+
+ ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
+ if (ret)
+ goto out_kobject;
+
+ ret = sysfs_create_bin_file(bgrt_kobj, &image_attr);
+ if (ret)
+ goto out_group;
+
+ return 0;
+
+out_group:
+ sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
+out_kobject:
+ kobject_put(bgrt_kobj);
+out_iounmap:
+ iounmap(image_attr.private);
+out_err:
+ return ret;
+}
+
+static void __exit bgrt_exit(void)
+{
+ iounmap(image_attr.private);
+ sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
+ sysfs_remove_bin_file(bgrt_kobj, &image_attr);
+}
+
+module_init(bgrt_init);
+module_exit(bgrt_exit);
+
+MODULE_AUTHOR("Matthew Garrett");
+MODULE_DESCRIPTION("BGRT boot graphic support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9ecec98bc76e..3263b68cdfa3 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1010,6 +1010,7 @@ static int __init acpi_bus_init(void)
}
struct kobject *acpi_kobj;
+EXPORT_SYMBOL_GPL(acpi_kobj);
static int __init acpi_init(void)
{
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e37615f310d7..7edaccce6640 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -822,10 +822,10 @@ static int acpi_ec_add(struct acpi_device *device)
first_ec = ec;
device->driver_data = ec;
- WARN(!request_region(ec->data_addr, 1, "EC data"),
- "Could not request EC data io port 0x%lx", ec->data_addr);
- WARN(!request_region(ec->command_addr, 1, "EC cmd"),
- "Could not request EC cmd io port 0x%lx", ec->command_addr);
+ ret = !!request_region(ec->data_addr, 1, "EC data");
+ WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
+ ret = !!request_region(ec->command_addr, 1, "EC cmd");
+ WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 7a2035fa8c71..266bc58ce0ce 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -95,8 +95,8 @@ static int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
- pr_info("PM: Registering ACPI NVS region at %lx (%ld bytes)\n",
- start, size);
+ pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
+ start, start + size - 1, size);
while (size > 0) {
unsigned int nr_bytes;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 412a1e04a922..ba14fb93c929 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -77,6 +77,9 @@ EXPORT_SYMBOL(acpi_in_debugger);
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER */
+static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
+ u32 pm1b_ctrl);
+
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
@@ -347,7 +350,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
unsigned long pfn;
pfn = pg_off >> PAGE_SHIFT;
- if (page_is_ram(pfn))
+ if (should_use_kmap(pfn))
kunmap(pfn_to_page(pfn));
else
iounmap(vaddr);
@@ -554,6 +557,15 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
return AE_OK;
}
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address * new_address,
+ u32 *new_table_length)
+{
+ return AE_SUPPORT;
+}
+
+
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
u32 handled;
@@ -595,7 +607,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+ if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi",
+ acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
@@ -699,49 +712,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
EXPORT_SYMBOL(acpi_os_write_port);
-acpi_status
-acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
-{
- void __iomem *virt_addr;
- unsigned int size = width / 8;
- bool unmap = false;
- u32 dummy;
-
- rcu_read_lock();
- virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
- if (!virt_addr) {
- rcu_read_unlock();
- virt_addr = acpi_os_ioremap(phys_addr, size);
- if (!virt_addr)
- return AE_BAD_ADDRESS;
- unmap = true;
- }
-
- if (!value)
- value = &dummy;
-
- switch (width) {
- case 8:
- *(u8 *) value = readb(virt_addr);
- break;
- case 16:
- *(u16 *) value = readw(virt_addr);
- break;
- case 32:
- *(u32 *) value = readl(virt_addr);
- break;
- default:
- BUG();
- }
-
- if (unmap)
- iounmap(virt_addr);
- else
- rcu_read_unlock();
-
- return AE_OK;
-}
-
#ifdef readq
static inline u64 read64(const volatile void __iomem *addr)
{
@@ -758,7 +728,7 @@ static inline u64 read64(const volatile void __iomem *addr)
#endif
acpi_status
-acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
+acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
@@ -803,45 +773,6 @@ acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
return AE_OK;
}
-acpi_status
-acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
-{
- void __iomem *virt_addr;
- unsigned int size = width / 8;
- bool unmap = false;
-
- rcu_read_lock();
- virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
- if (!virt_addr) {
- rcu_read_unlock();
- virt_addr = acpi_os_ioremap(phys_addr, size);
- if (!virt_addr)
- return AE_BAD_ADDRESS;
- unmap = true;
- }
-
- switch (width) {
- case 8:
- writeb(value, virt_addr);
- break;
- case 16:
- writew(value, virt_addr);
- break;
- case 32:
- writel(value, virt_addr);
- break;
- default:
- BUG();
- }
-
- if (unmap)
- iounmap(virt_addr);
- else
- rcu_read_unlock();
-
- return AE_OK;
-}
-
#ifdef writeq
static inline void write64(u64 val, volatile void __iomem *addr)
{
@@ -856,7 +787,7 @@ static inline void write64(u64 val, volatile void __iomem *addr)
#endif
acpi_status
-acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width)
+acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
@@ -1641,3 +1572,24 @@ acpi_status acpi_os_terminate(void)
return AE_OK;
}
+
+acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
+ u32 pm1b_control)
+{
+ int rc = 0;
+ if (__acpi_os_prepare_sleep)
+ rc = __acpi_os_prepare_sleep(sleep_state,
+ pm1a_control, pm1b_control);
+ if (rc < 0)
+ return AE_ERROR;
+ else if (rc > 0)
+ return AE_CTRL_SKIP;
+
+ return AE_OK;
+}
+
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
+ u32 pm1a_ctrl, u32 pm1b_ctrl))
+{
+ __acpi_os_prepare_sleep = func;
+}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 9ac2a9fa90ff..7049a7d27c4f 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -40,9 +40,11 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "sleep.h"
+#include "internal.h"
#define PREFIX "ACPI: "
@@ -77,6 +79,20 @@ static struct acpi_driver acpi_power_driver = {
},
};
+/*
+ * A power managed device
+ * A device may rely on multiple power resources.
+ * */
+struct acpi_power_managed_device {
+ struct device *dev; /* The physical device */
+ acpi_handle *handle;
+};
+
+struct acpi_power_resource_device {
+ struct acpi_power_managed_device *device;
+ struct acpi_power_resource_device *next;
+};
+
struct acpi_power_resource {
struct acpi_device * device;
acpi_bus_id name;
@@ -84,6 +100,9 @@ struct acpi_power_resource {
u32 order;
unsigned int ref_count;
struct mutex resource_lock;
+
+ /* List of devices relying on this power resource */
+ struct acpi_power_resource_device *devices;
};
static struct list_head acpi_power_resource_list;
@@ -183,8 +202,26 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return 0;
}
+/* Resume the device when all power resources in _PR0 are on */
+static void acpi_power_on_device(struct acpi_power_managed_device *device)
+{
+ struct acpi_device *acpi_dev;
+ acpi_handle handle = device->handle;
+ int state;
+
+ if (acpi_bus_get_device(handle, &acpi_dev))
+ return;
+
+ if(acpi_power_get_inferred_state(acpi_dev, &state))
+ return;
+
+ if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev))
+ pm_request_resume(device->dev);
+}
+
static int __acpi_power_on(struct acpi_power_resource *resource)
{
+ struct acpi_power_resource_device *device_list = resource->devices;
acpi_status status = AE_OK;
status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
@@ -197,6 +234,12 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
resource->name));
+ while (device_list) {
+ acpi_power_on_device(device_list->device);
+
+ device_list = device_list->next;
+ }
+
return 0;
}
@@ -299,6 +342,125 @@ static int acpi_power_on_list(struct acpi_handle_list *list)
return result;
}
+static void __acpi_power_resource_unregister_device(struct device *dev,
+ acpi_handle res_handle)
+{
+ struct acpi_power_resource *resource = NULL;
+ struct acpi_power_resource_device *prev, *curr;
+
+ if (acpi_power_get_context(res_handle, &resource))
+ return;
+
+ mutex_lock(&resource->resource_lock);
+ prev = NULL;
+ curr = resource->devices;
+ while (curr) {
+ if (curr->device->dev == dev) {
+ if (!prev)
+ resource->devices = curr->next;
+ else
+ prev->next = curr->next;
+
+ kfree(curr);
+ break;
+ }
+
+ prev = curr;
+ curr = curr->next;
+ }
+ mutex_unlock(&resource->resource_lock);
+}
+
+/* Unlink dev from all power resources in _PR0 */
+void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle)
+{
+ struct acpi_device *acpi_dev;
+ struct acpi_handle_list *list;
+ int i;
+
+ if (!dev || !handle)
+ return;
+
+ if (acpi_bus_get_device(handle, &acpi_dev))
+ return;
+
+ list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+
+ for (i = 0; i < list->count; i++)
+ __acpi_power_resource_unregister_device(dev,
+ list->handles[i]);
+}
+
+static int __acpi_power_resource_register_device(
+ struct acpi_power_managed_device *powered_device, acpi_handle handle)
+{
+ struct acpi_power_resource *resource = NULL;
+ struct acpi_power_resource_device *power_resource_device;
+ int result;
+
+ result = acpi_power_get_context(handle, &resource);
+ if (result)
+ return result;
+
+ power_resource_device = kzalloc(
+ sizeof(*power_resource_device), GFP_KERNEL);
+ if (!power_resource_device)
+ return -ENOMEM;
+
+ power_resource_device->device = powered_device;
+
+ mutex_lock(&resource->resource_lock);
+ power_resource_device->next = resource->devices;
+ resource->devices = power_resource_device;
+ mutex_unlock(&resource->resource_lock);
+
+ return 0;
+}
+
+/* Link dev to all power resources in _PR0 */
+int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
+{
+ struct acpi_device *acpi_dev;
+ struct acpi_handle_list *list;
+ struct acpi_power_managed_device *powered_device;
+ int i, ret;
+
+ if (!dev || !handle)
+ return -ENODEV;
+
+ ret = acpi_bus_get_device(handle, &acpi_dev);
+ if (ret)
+ goto no_power_resource;
+
+ if (!acpi_dev->power.flags.power_resources)
+ goto no_power_resource;
+
+ powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
+ if (!powered_device)
+ return -ENOMEM;
+
+ powered_device->dev = dev;
+ powered_device->handle = handle;
+
+ list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+
+ for (i = 0; i < list->count; i++) {
+ ret = __acpi_power_resource_register_device(powered_device,
+ list->handles[i]);
+
+ if (ret) {
+ acpi_power_resource_unregister_device(dev, handle);
+ break;
+ }
+ }
+
+ return ret;
+
+no_power_resource:
+ printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
+ return -ENODEV;
+}
+
/**
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
* ACPI 3.0) _PSW (Power State Wake)
@@ -500,14 +662,14 @@ int acpi_power_transition(struct acpi_device *device, int state)
{
int result;
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
if (device->power.state == state)
return 0;
if ((device->power.state < ACPI_STATE_D0)
- || (device->power.state > ACPI_STATE_D3))
+ || (device->power.state > ACPI_STATE_D3_COLD))
return -ENODEV;
/* TBD: Resources must be ordered. */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d4d9cb7e016a..0734086537b8 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -67,6 +67,7 @@
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
+#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
#define ACPI_PROCESSOR_LIMIT_USER 0
#define ACPI_PROCESSOR_LIMIT_THERMAL 1
@@ -87,7 +88,7 @@ static int acpi_processor_start(struct acpi_processor *pr);
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
- {"ACPI0007", 0},
+ {ACPI_PROCESSOR_DEVICE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@ -535,8 +536,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
return -ENOMEM;
if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
- kfree(pr);
- return -ENOMEM;
+ result = -ENOMEM;
+ goto err_free_pr;
}
pr->handle = device->handle;
@@ -576,7 +577,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
dev = get_cpu_device(pr->id);
if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
result = -EFAULT;
- goto err_free_cpumask;
+ goto err_clear_processor;
}
/*
@@ -594,9 +595,15 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
err_remove_sysfs:
sysfs_remove_link(&device->dev.kobj, "sysdev");
+err_clear_processor:
+ /*
+ * processor_device_array is not cleared to allow checks for buggy BIOS
+ */
+ per_cpu(processors, pr->id) = NULL;
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
-
+err_free_pr:
+ kfree(pr);
return result;
}
@@ -741,20 +748,46 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
return;
}
+static acpi_status is_processor_device(acpi_handle handle)
+{
+ struct acpi_device_info *info;
+ char *hid;
+ acpi_status status;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (info->type == ACPI_TYPE_PROCESSOR) {
+ kfree(info);
+ return AE_OK; /* found a processor object */
+ }
+
+ if (!(info->valid & ACPI_VALID_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ hid = info->hardware_id.string;
+ if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ kfree(info);
+ return AE_OK; /* found a processor device object */
+}
+
static acpi_status
processor_walk_namespace_cb(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
acpi_status status;
int *action = context;
- acpi_object_type type = 0;
- status = acpi_get_type(handle, &type);
+ status = is_processor_device(handle);
if (ACPI_FAILURE(status))
- return (AE_OK);
-
- if (type != ACPI_TYPE_PROCESSOR)
- return (AE_OK);
+ return AE_OK; /* not a processor; continue to walk */
switch (*action) {
case INSTALL_NOTIFY_HANDLER:
@@ -772,7 +805,8 @@ processor_walk_namespace_cb(acpi_handle handle,
break;
}
- return (AE_OK);
+ /* found a processor; skip walking underneath */
+ return AE_CTRL_DEPTH;
}
static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
@@ -830,7 +864,7 @@ void acpi_processor_install_hotplug_notify(void)
{
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int action = INSTALL_NOTIFY_HANDLER;
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+ acpi_walk_namespace(ACPI_TYPE_ANY,
ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
processor_walk_namespace_cb, NULL, &action, NULL);
@@ -843,7 +877,7 @@ void acpi_processor_uninstall_hotplug_notify(void)
{
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int action = UNINSTALL_NOTIFY_HANDLER;
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+ acpi_walk_namespace(ACPI_TYPE_ANY,
ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
processor_walk_namespace_cb, NULL, &action, NULL);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0e8e2de2ed3e..b3447f63e46b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -770,6 +770,35 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
return index;
}
+
+/**
+ * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
+ * @dev: the target CPU
+ * @index: the index of suggested state
+ */
+static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+{
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ while (1) {
+
+ if (cx->entry_method == ACPI_CSTATE_HALT)
+ halt();
+ else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
+ inb(cx->address);
+ /* See comment in acpi_idle_do_entry() */
+ inl(acpi_gbl_FADT.xpm_timer_block.address);
+ } else
+ return -ENODEV;
+ }
+
+ /* Never reached */
+ return 0;
+}
+
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
@@ -1077,12 +1106,14 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_c1;
+ state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
break;
case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
+ state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
break;
@@ -1159,8 +1190,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
* to make the code that updates C-States be called once.
*/
- if (smp_processor_id() == 0 &&
- cpuidle_get_driver() == &acpi_idle_driver) {
+ if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
cpuidle_pause_and_lock();
/* Protect against cpu-hotplug */
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 3b599abf2b40..641b5450a0db 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -57,6 +57,27 @@ ACPI_MODULE_NAME("processor_thermal");
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
static unsigned int acpi_thermal_cpufreq_is_init = 0;
+#define reduction_pctg(cpu) \
+ per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
+
+/*
+ * Emulate "per package data" using per cpu data (which should really be
+ * provided elsewhere)
+ *
+ * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
+ * temporarily. Fortunately that's not a big issue here (I hope)
+ */
+static int phys_package_first_cpu(int cpu)
+{
+ int i;
+ int id = topology_physical_package_id(cpu);
+
+ for_each_online_cpu(i)
+ if (topology_physical_package_id(i) == id)
+ return i;
+ return 0;
+}
+
static int cpu_has_cpufreq(unsigned int cpu)
{
struct cpufreq_policy policy;
@@ -76,7 +97,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
max_freq = (
policy->cpuinfo.max_freq *
- (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+ (100 - reduction_pctg(policy->cpu) * 20)
) / 100;
cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -102,16 +123,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
- return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
+ return reduction_pctg(cpu);
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
+ int i;
+
if (!cpu_has_cpufreq(cpu))
return 0;
- per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
- cpufreq_update_policy(cpu);
+ reduction_pctg(cpu) = state;
+
+ /*
+ * Update all the CPUs in the same package because they all
+ * contribute to the temperature and often share the same
+ * frequency.
+ */
+ for_each_online_cpu(i) {
+ if (topology_physical_package_id(i) ==
+ topology_physical_package_id(cpu))
+ cpufreq_update_policy(i);
+ }
return 0;
}
@@ -119,10 +152,6 @@ void acpi_thermal_cpufreq_init(void)
{
int i;
- for (i = 0; i < nr_cpu_ids; i++)
- if (cpu_present(i))
- per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
-
i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
if (!i)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 605a2954ef17..1d02b7b5ade0 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -769,7 +769,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
u64 *value)
{
u32 bit_width, bit_offset;
- u64 ptc_value;
+ u32 ptc_value;
u64 ptc_mask;
struct acpi_processor_throttling *throttling;
int ret = -1;
@@ -777,12 +777,11 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
throttling = &pr->throttling;
switch (throttling->status_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
- ptc_value = 0;
bit_width = throttling->status_register.bit_width;
bit_offset = throttling->status_register.bit_offset;
acpi_os_read_port((acpi_io_address) throttling->status_register.
- address, (u32 *) &ptc_value,
+ address, &ptc_value,
(u32) (bit_width + bit_offset));
ptc_mask = (1 << bit_width) - 1;
*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6c77e8b37bd..c1d612435939 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -23,8 +23,7 @@ void acpi_reboot(void)
/* Is the reset register supported? The spec says we should be
* checking the bit width and bit offset, but Windows ignores
* these fields */
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
- return;
+ /* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */
reset_value = acpi_gbl_FADT.reset_value;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8ab80bafe3f1..767e2dcb9616 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -880,18 +880,22 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
int j;
device->power.flags.power_resources = 1;
- ps->flags.valid = 1;
for (j = 0; j < ps->resources.count; j++)
acpi_bus_add_power_resource(ps->resources.handles[j]);
}
+ /* The exist of _PR3 indicates D3Cold support */
+ if (i == ACPI_STATE_D3) {
+ status = acpi_get_handle(device->handle, object_name, &handle);
+ if (ACPI_SUCCESS(status))
+ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+ }
+
/* Evaluate "_PSx" to see if we can do explicit sets */
object_name[2] = 'S';
status = acpi_get_handle(device->handle, object_name, &handle);
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status))
ps->flags.explicit_set = 1;
- ps->flags.valid = 1;
- }
/* State is valid if we have some power control */
if (ps->resources.count || ps->flags.explicit_set)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ca191ff97844..1d661b5c3287 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -17,6 +17,8 @@
#include <linux/suspend.h>
#include <linux/reboot.h>
#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -26,6 +28,24 @@
#include "internal.h"
#include "sleep.h"
+static unsigned int gts, bfs;
+module_param(gts, uint, 0644);
+module_param(bfs, uint, 0644);
+MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
+MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
+
+static u8 wake_sleep_flags(void)
+{
+ u8 flags = ACPI_NO_OPTIONAL_METHODS;
+
+ if (gts)
+ flags |= ACPI_EXECUTE_GTS;
+ if (bfs)
+ flags |= ACPI_EXECUTE_BFS;
+
+ return flags;
+}
+
static u8 sleep_states[ACPI_S_STATE_COUNT];
static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -243,6 +263,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
u32 acpi_state = acpi_target_sleep_state;
+ u8 flags = wake_sleep_flags();
int error;
ACPI_FLUSH_CPU_CACHE();
@@ -250,7 +271,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
- status = acpi_enter_sleep_state(acpi_state);
+ status = acpi_enter_sleep_state(acpi_state, flags);
break;
case ACPI_STATE_S3:
@@ -265,7 +286,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(acpi_state);
+ acpi_leave_sleep_state_prep(acpi_state, flags);
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
@@ -529,27 +550,30 @@ static int acpi_hibernation_begin(void)
static int acpi_hibernation_enter(void)
{
+ u8 flags = wake_sleep_flags();
acpi_status status = AE_OK;
ACPI_FLUSH_CPU_CACHE();
/* This shouldn't return. If it returns, we have a problem */
- status = acpi_enter_sleep_state(ACPI_STATE_S4);
+ status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
/* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
static void acpi_hibernation_leave(void)
{
+ u8 flags = wake_sleep_flags();
+
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
*/
acpi_enable();
/* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature) {
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -730,6 +754,40 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
#ifdef CONFIG_PM_SLEEP
/**
+ * acpi_pm_device_run_wake - Enable/disable wake-up for given device.
+ * @phys_dev: Device to enable/disable the platform to wake-up the system for.
+ * @enable: Whether enable or disable the wake-up functionality.
+ *
+ * Find the ACPI device object corresponding to @pci_dev and try to
+ * enable/disable the GPE associated with it.
+ */
+int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
+{
+ struct acpi_device *dev;
+ acpi_handle handle;
+
+ if (!device_run_wake(phys_dev))
+ return -EINVAL;
+
+ handle = DEVICE_ACPI_HANDLE(phys_dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
+ dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (enable) {
+ acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
+ acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
+ } else {
+ acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
+ acpi_disable_wakeup_device_power(dev);
+ }
+
+ return 0;
+}
+
+/**
* acpi_pm_device_sleep_wake - enable or disable the system wake-up
* capability of given device
* @dev: device to handle
@@ -770,10 +828,12 @@ static void acpi_power_off_prepare(void)
static void acpi_power_off(void)
{
+ u8 flags = wake_sleep_flags();
+
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk(KERN_DEBUG "%s called\n", __func__);
local_irq_disable();
- acpi_enter_sleep_state(ACPI_STATE_S5);
+ acpi_enter_sleep_state(ACPI_STATE_S5, flags);
}
/*
@@ -788,13 +848,13 @@ static void __init acpi_gts_bfs_check(void)
{
acpi_handle dummy;
- if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__GTS, &dummy)))
+ if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
{
printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
"please notify linux-acpi@vger.kernel.org\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__BFS, &dummy)))
+ if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
{
printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 48fbc647b178..7dbebea1ec31 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -941,13 +941,13 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
if (!tz)
return -EINVAL;
- /* Get temperature [_TMP] (required) */
- result = acpi_thermal_get_temperature(tz);
+ /* Get trip points [_CRT, _PSV, etc.] (required) */
+ result = acpi_thermal_get_trip_points(tz);
if (result)
return result;
- /* Get trip points [_CRT, _PSV, etc.] (required) */
- result = acpi_thermal_get_trip_points(tz);
+ /* Get temperature [_TMP] (required) */
+ result = acpi_thermal_get_temperature(tz);
if (result)
return result;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index eaef02afc7cf..9577b6fa2650 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -548,27 +548,27 @@ acpi_video_device_EDID(struct acpi_video_device *device,
* 1. The system BIOS should NOT automatically control the brightness
* level of the LCD when the power changes from AC to DC.
* Return Value:
- * -1 wrong arg.
+ * -EINVAL wrong arg.
*/
static int
acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
{
- u64 status = 0;
+ acpi_status status;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
- if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) {
- status = -1;
- goto Failed;
- }
+ if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
+ return -EINVAL;
arg0.integer.value = (lcd_flag << 2) | bios_flag;
video->dos_setting = arg0.integer.value;
- acpi_evaluate_object(video->device->handle, "_DOS", &args, NULL);
+ status = acpi_evaluate_object(video->device->handle, "_DOS",
+ &args, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
- Failed:
- return status;
+ return 0;
}
/*
@@ -1343,15 +1343,17 @@ static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
{
- int status = 0;
+ int status;
struct acpi_device *dev;
- acpi_video_device_enumerate(video);
+ status = acpi_video_device_enumerate(video);
+ if (status)
+ return status;
list_for_each_entry(dev, &device->children, node) {
status = acpi_video_bus_get_one_device(dev, video);
- if (ACPI_FAILURE(status)) {
+ if (status) {
printk(KERN_WARNING PREFIX
"Can't attach device\n");
continue;
@@ -1653,15 +1655,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
mutex_init(&video->device_list_lock);
INIT_LIST_HEAD(&video->video_device_list);
- acpi_video_bus_get_devices(video, device);
- acpi_video_bus_start_devices(video);
+ error = acpi_video_bus_get_devices(video, device);
+ if (error)
+ goto err_free_video;
video->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
- goto err_stop_video;
+ goto err_put_video;
}
+ error = acpi_video_bus_start_devices(video);
+ if (error)
+ goto err_free_input_dev;
+
snprintf(video->phys, sizeof(video->phys),
"%s/video/input0", acpi_device_hid(video->device));
@@ -1682,7 +1689,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
- goto err_free_input_dev;
+ goto err_stop_video;
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
@@ -1692,14 +1699,19 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->pm_nb.notifier_call = acpi_video_resume;
video->pm_nb.priority = 0;
- register_pm_notifier(&video->pm_nb);
+ error = register_pm_notifier(&video->pm_nb);
+ if (error)
+ goto err_unregister_input_dev;
return 0;
- err_free_input_dev:
- input_free_device(input);
+ err_unregister_input_dev:
+ input_unregister_device(input);
err_stop_video:
acpi_video_bus_stop_devices(video);
+ err_free_input_dev:
+ input_free_device(input);
+ err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
err_free_video:
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0fbf1a776b52..a741e418b456 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -705,16 +705,13 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
{
unsigned int minor;
struct timeval par_timeout;
- struct compat_timeval __user *tc;
int ret;
minor = iminor(file->f_path.dentry->d_inode);
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
- tc = compat_ptr(arg);
- if (get_user(par_timeout.tv_sec, &tc->tv_sec) ||
- get_user(par_timeout.tv_usec, &tc->tv_usec)) {
+ if (compat_get_timeval(&par_timeout, compat_ptr(arg))) {
ret = -EFAULT;
break;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 999d6a03e436..5138927a416c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,7 +26,6 @@ config CLKSRC_DBX500_PRCMU
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
- select HAVE_SCHED_CLOCK
default y
help
Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6588f43017bd..87411cebc577 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -53,6 +53,52 @@ static void cpuidle_kick_cpus(void) {}
static int __cpuidle_register_device(struct cpuidle_device *dev);
+static inline int cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *target_state = &drv->states[index];
+ return target_state->enter(dev, drv, index);
+}
+
+static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
+}
+
+typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+
+static cpuidle_enter_t cpuidle_enter_ops;
+
+/**
+ * cpuidle_play_dead - cpu off-lining
+ *
+ * Only returns in case of an error
+ */
+int cpuidle_play_dead(void)
+{
+ struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_driver *drv = cpuidle_get_driver();
+ int i, dead_state = -1;
+ int power_usage = -1;
+
+ /* Find lowest-power state that supports long-term idle */
+ for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+
+ if (s->power_usage < power_usage && s->enter_dead) {
+ power_usage = s->power_usage;
+ dead_state = i;
+ }
+ }
+
+ if (dead_state != -1)
+ return drv->states[dead_state].enter_dead(dev, dead_state);
+
+ return -ENODEV;
+}
+
/**
* cpuidle_idle_call - the main idle loop
*
@@ -63,7 +109,6 @@ int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_driver();
- struct cpuidle_state *target_state;
int next_state, entered_state;
if (off)
@@ -92,12 +137,10 @@ int cpuidle_idle_call(void)
return 0;
}
- target_state = &drv->states[next_state];
-
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- entered_state = target_state->enter(dev, drv, next_state);
+ entered_state = cpuidle_enter_ops(dev, drv, next_state);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -110,6 +153,8 @@ int cpuidle_idle_call(void)
dev->states_usage[entered_state].time +=
(unsigned long long)dev->last_residency;
dev->states_usage[entered_state].usage++;
+ } else {
+ dev->last_residency = 0;
}
/* give the governor an opportunity to reflect on the outcome */
@@ -164,6 +209,37 @@ void cpuidle_resume_and_unlock(void)
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
+/**
+ * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
+ * @dev: pointer to a valid cpuidle_device object
+ * @drv: pointer to a valid cpuidle_driver object
+ * @index: index of the target cpuidle state.
+ */
+int cpuidle_wrap_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index,
+ int (*enter)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index))
+{
+ ktime_t time_start, time_end;
+ s64 diff;
+
+ time_start = ktime_get();
+
+ index = enter(dev, drv, index);
+
+ time_end = ktime_get();
+
+ local_irq_enable();
+
+ diff = ktime_to_us(ktime_sub(time_end, time_start));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
+}
+
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
static int poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -197,6 +273,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
state->power_usage = -1;
state->flags = 0;
state->enter = poll_idle;
+ state->disable = 0;
}
#else
static void poll_idle_init(struct cpuidle_driver *drv) {}
@@ -212,13 +289,14 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
int cpuidle_enable_device(struct cpuidle_device *dev)
{
int ret, i;
+ struct cpuidle_driver *drv = cpuidle_get_driver();
if (dev->enabled)
return 0;
- if (!cpuidle_get_driver() || !cpuidle_curr_governor)
+ if (!drv || !cpuidle_curr_governor)
return -EIO;
if (!dev->state_count)
- return -EINVAL;
+ dev->state_count = drv->state_count;
if (dev->registered == 0) {
ret = __cpuidle_register_device(dev);
@@ -226,13 +304,16 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return ret;
}
- poll_idle_init(cpuidle_get_driver());
+ cpuidle_enter_ops = drv->en_core_tk_irqen ?
+ cpuidle_enter_tk : cpuidle_enter;
+
+ poll_idle_init(drv);
if ((ret = cpuidle_add_state_sysfs(dev)))
return ret;
if (cpuidle_curr_governor->enable &&
- (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
+ (ret = cpuidle_curr_governor->enable(drv, dev)))
goto fail_sysfs;
for (i = 0; i < dev->state_count; i++) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 284d7af5a9c8..40cd3f3024df 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -47,7 +47,7 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
- if (!drv)
+ if (!drv || !drv->state_count)
return -EINVAL;
if (cpuidle_disabled())
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index ad0952601ae2..06335756ea14 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -236,7 +236,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- unsigned int power_usage = -1;
+ int power_usage = -1;
int i;
int multiplier;
struct timespec t;
@@ -280,7 +280,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* We want to default to C1 (hlt), not to busy polling
* unless the timer is happening really really soon.
*/
- if (data->expected_us > 5)
+ if (data->expected_us > 5 &&
+ drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
/*
@@ -290,6 +291,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
+ if (s->disable)
+ continue;
if (s->target_residency > data->predicted_us)
continue;
if (s->exit_latency > latency_req)
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 3fe41fe4851a..88032b4dc6d2 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -11,6 +11,7 @@
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/capability.h>
#include "cpuidle.h"
@@ -222,6 +223,9 @@ struct cpuidle_state_attr {
#define define_one_state_ro(_name, show) \
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
+#define define_one_state_rw(_name, show, store) \
+static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
+
#define define_show_state_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
@@ -229,6 +233,24 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%u\n", state->_name);\
}
+#define define_store_state_function(_name) \
+static ssize_t store_state_##_name(struct cpuidle_state *state, \
+ const char *buf, size_t size) \
+{ \
+ long value; \
+ int err; \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ err = kstrtol(buf, 0, &value); \
+ if (err) \
+ return err; \
+ if (value) \
+ state->disable = 1; \
+ else \
+ state->disable = 0; \
+ return size; \
+}
+
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
@@ -251,6 +273,8 @@ define_show_state_ull_function(usage)
define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
+define_show_state_function(disable)
+define_store_state_function(disable)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
@@ -258,6 +282,7 @@ define_one_state_ro(latency, show_state_exit_latency);
define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
define_one_state_ro(time, show_state_time);
+define_one_state_rw(disable, show_state_disable, store_state_disable);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
@@ -266,6 +291,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_power.attr,
&attr_usage.attr,
&attr_time.attr,
+ &attr_disable.attr,
NULL
};
@@ -287,8 +313,22 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
return ret;
}
+static ssize_t cpuidle_state_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t size)
+{
+ int ret = -EIO;
+ struct cpuidle_state *state = kobj_to_state(kobj);
+ struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
+
+ if (cattr->store)
+ ret = cattr->store(state, buf, size);
+
+ return ret;
+}
+
static const struct sysfs_ops cpuidle_state_sysfs_ops = {
.show = cpuidle_state_show,
+ .store = cpuidle_state_store,
};
static void cpuidle_state_sysfs_release(struct kobject *kobj)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 4a6c46dea8a0..cf9da362d64f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -201,7 +201,6 @@ config PL330_DMA
tristate "DMA API Driver for PL330"
select DMA_ENGINE
depends on ARM_AMBA
- select PL330
help
Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via
@@ -231,7 +230,7 @@ config IMX_SDMA
config IMX_DMA
tristate "i.MX DMA support"
- depends on IMX_HAVE_DMA_V1
+ depends on ARCH_MXC
select DMA_ENGINE
help
Support the i.MX DMA engine. This engine is integrated into
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 8a281584458b..c301a8ec31aa 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -85,6 +85,8 @@
#include <linux/slab.h>
#include <asm/hardware/pl080.h>
+#include "dmaengine.h"
+
#define DRIVER_NAME "pl08xdmac"
static struct amba_driver pl08x_amba_driver;
@@ -649,7 +651,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
}
if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
- (bd.srcbus.addr % bd.srcbus.buswidth)) {
+ (bd.dstbus.addr % bd.dstbus.buswidth)) {
dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller",
@@ -919,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
struct pl08x_txd *txd = to_pl08x_txd(tx);
unsigned long flags;
+ dma_cookie_t cookie;
spin_lock_irqsave(&plchan->lock, flags);
-
- plchan->chan.cookie += 1;
- if (plchan->chan.cookie < 0)
- plchan->chan.cookie = 1;
- tx->cookie = plchan->chan.cookie;
+ cookie = dma_cookie_assign(tx);
/* Put this onto the pending list */
list_add_tail(&txd->node, &plchan->pend_list);
@@ -945,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
spin_unlock_irqrestore(&plchan->lock, flags);
- return tx->cookie;
+ return cookie;
}
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -965,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
enum dma_status ret;
- u32 bytesleft = 0;
- last_used = plchan->chan.cookie;
- last_complete = plchan->lc;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret == DMA_SUCCESS) {
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS)
return ret;
- }
/*
* This cookie not complete yet
+ * Get number of bytes left in the active transactions and queue
*/
- last_used = plchan->chan.cookie;
- last_complete = plchan->lc;
-
- /* Get number of bytes left in the active transactions and queue */
- bytesleft = pl08x_getbytes_chan(plchan);
-
- dma_set_tx_state(txstate, last_complete, last_used,
- bytesleft);
+ dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
if (plchan->state == PL08X_CHAN_PAUSED)
return DMA_PAUSED;
@@ -1139,6 +1124,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+ plchan->device_fc = config->device_fc;
+
if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
@@ -1326,7 +1313,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
@@ -1370,7 +1357,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return NULL;
}
- if (plchan->cd->device_fc)
+ if (plchan->device_fc)
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
@@ -1541,7 +1528,7 @@ static void pl08x_tasklet(unsigned long data)
if (txd) {
/* Update last completed */
- plchan->lc = txd->tx.cookie;
+ dma_cookie_complete(&txd->tx);
}
/* If a new descriptor is queued, set it up plchan->at is NULL here */
@@ -1722,8 +1709,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->name);
chan->chan.device = dmadev;
- chan->chan.cookie = 0;
- chan->lc = 0;
+ dma_cookie_init(&chan->chan);
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pend_list);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f4aed5fc2cb6..7aa58d204892 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -27,6 +27,7 @@
#include <linux/of_device.h>
#include "at_hdmac_regs.h"
+#include "dmaengine.h"
/*
* Glossary
@@ -192,27 +193,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
}
/**
- * atc_assign_cookie - compute and assign new cookie
- * @atchan: channel we work on
- * @desc: descriptor to assign cookie for
- *
- * Called with atchan->lock held and bh disabled
- */
-static dma_cookie_t
-atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
-{
- dma_cookie_t cookie = atchan->chan_common.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- atchan->chan_common.cookie = cookie;
- desc->txd.cookie = cookie;
-
- return cookie;
-}
-
-/**
* atc_dostart - starts the DMA engine for real
* @atchan: the channel we want to start
* @first: first descriptor in the list we want to begin with
@@ -269,7 +249,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
- atchan->completed_cookie = txd->cookie;
+ dma_cookie_complete(txd);
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -547,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags;
spin_lock_irqsave(&atchan->lock, flags);
- cookie = atc_assign_cookie(atchan, desc);
+ cookie = dma_cookie_assign(tx);
if (list_empty(&atchan->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
@@ -659,14 +639,16 @@ err_desc_get:
* @sg_len: number of entries in @scatterlist
* @direction: DMA direction
* @flags: tx descriptor status flags
+ * @context: transaction context (ignored)
*/
static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
+ struct dma_slave_config *sconfig = &atchan->dma_sconfig;
struct at_desc *first = NULL;
struct at_desc *prev = NULL;
u32 ctrla;
@@ -688,19 +670,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- reg_width = atslave->reg_width;
-
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
ctrlb = ATC_IEN;
switch (direction) {
case DMA_MEM_TO_DEV:
+ reg_width = convert_buswidth(sconfig->dst_addr_width);
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
| ATC_FC_MEM2PER
| ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
- reg = atslave->tx_reg;
+ reg = sconfig->dst_addr;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
u32 len;
@@ -728,13 +709,14 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
break;
case DMA_DEV_TO_MEM:
+ reg_width = convert_buswidth(sconfig->src_addr_width);
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
| ATC_FC_PER2MEM
| ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
- reg = atslave->rx_reg;
+ reg = sconfig->src_addr;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
u32 len;
@@ -810,12 +792,15 @@ err_out:
* atc_dma_cyclic_fill_desc - Fill one period decriptor
*/
static int
-atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
+atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ unsigned int reg_width, size_t period_len,
+ enum dma_transfer_direction direction)
{
- u32 ctrla;
- unsigned int reg_width = atslave->reg_width;
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_slave *atslave = chan->private;
+ struct dma_slave_config *sconfig = &atchan->dma_sconfig;
+ u32 ctrla;
/* prepare common CRTLA value */
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
@@ -826,7 +811,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
switch (direction) {
case DMA_MEM_TO_DEV:
desc->lli.saddr = buf_addr + (period_len * period_index);
- desc->lli.daddr = atslave->tx_reg;
+ desc->lli.daddr = sconfig->dst_addr;
desc->lli.ctrla = ctrla;
desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
@@ -836,7 +821,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
break;
case DMA_DEV_TO_MEM:
- desc->lli.saddr = atslave->rx_reg;
+ desc->lli.saddr = sconfig->src_addr;
desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla;
desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
@@ -860,16 +845,20 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
* @buf_len: total number of bytes for the entire buffer
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
+ * @context: transfer context (ignored)
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
+ struct dma_slave_config *sconfig = &atchan->dma_sconfig;
struct at_desc *first = NULL;
struct at_desc *prev = NULL;
unsigned long was_cyclic;
+ unsigned int reg_width;
unsigned int periods = buf_len / period_len;
unsigned int i;
@@ -889,8 +878,13 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL;
}
+ if (sconfig->direction == DMA_MEM_TO_DEV)
+ reg_width = convert_buswidth(sconfig->dst_addr_width);
+ else
+ reg_width = convert_buswidth(sconfig->src_addr_width);
+
/* Check for too big/unaligned periods and unaligned DMA buffer */
- if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
+ if (atc_dma_cyclic_check_values(reg_width, buf_addr,
period_len, direction))
goto err_out;
@@ -902,8 +896,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (!desc)
goto err_desc_get;
- if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
- period_len, direction))
+ if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
+ reg_width, period_len, direction))
goto err_desc_get;
atc_desc_chain(&first, &prev, desc);
@@ -926,6 +920,23 @@ err_out:
return NULL;
}
+static int set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *sconfig)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ /* Check if it is chan is configured for slave transfers */
+ if (!chan->private)
+ return -EINVAL;
+
+ memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
+
+ convert_burst(&atchan->dma_sconfig.src_maxburst);
+ convert_burst(&atchan->dma_sconfig.dst_maxburst);
+
+ return 0;
+}
+
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
@@ -986,6 +997,8 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
+ } else if (cmd == DMA_SLAVE_CONFIG) {
+ return set_runtime_config(chan, (struct dma_slave_config *)arg);
} else {
return -ENXIO;
}
@@ -1016,26 +1029,20 @@ atc_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&atchan->lock, flags);
- last_complete = atchan->completed_cookie;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
atc_cleanup_descriptors(atchan);
- last_complete = atchan->completed_cookie;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
}
+ last_complete = chan->completed_cookie;
+ last_used = chan->cookie;
+
spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS)
- dma_set_tx_state(txstate, last_complete, last_used,
- atc_first_active(atchan)->len);
- else
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ dma_set_residue(txstate, atc_first_active(atchan)->len);
if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED;
@@ -1129,7 +1136,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list);
- atchan->completed_cookie = chan->cookie = 1;
+ dma_cookie_init(chan);
spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
@@ -1329,7 +1336,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
- atchan->chan_common.cookie = atchan->completed_cookie = 1;
+ dma_cookie_init(&atchan->chan_common);
list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index a8d3277d60b5..897a8bcaec90 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -207,8 +207,8 @@ enum atc_status {
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
+ * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
* @lock: serializes enqueue/dequeue operations to descriptors lists
- * @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on
* @queue: list of descriptors ready to be submitted to engine
* @free_list: list of descriptors usable by the channel
@@ -223,11 +223,11 @@ struct at_dma_chan {
struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
+ struct dma_slave_config dma_sconfig;
spinlock_t lock;
/* these other elements are all protected by lock */
- dma_cookie_t completed_cookie;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
@@ -245,6 +245,36 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
return container_of(dchan, struct at_dma_chan, chan_common);
}
+/*
+ * Fix sconfig's burst size according to at_hdmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline void convert_burst(u32 *maxburst)
+{
+ if (*maxburst > 1)
+ *maxburst = fls(*maxburst) - 2;
+ else
+ *maxburst = 0;
+}
+
+/*
+ * Fix sconfig's bus width according to at_hdmac.
+ * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
+ */
+static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return 1;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return 2;
+ default:
+ /* For 1 byte width or fallback */
+ return 0;
+ }
+}
/*-- Controller ------------------------------------------------------*/
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index d65a718c0f9b..dc89455f5550 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -24,6 +24,7 @@
#include <mach/coh901318.h>
#include "coh901318_lli.h"
+#include "dmaengine.h"
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
@@ -59,7 +60,6 @@ struct coh901318_base {
struct coh901318_chan {
spinlock_t lock;
int allocated;
- int completed;
int id;
int stopped;
@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
return 0;
}
-static dma_cookie_t
-coh901318_assign_cookie(struct coh901318_chan *cohc,
- struct coh901318_desc *cohd)
-{
- dma_cookie_t cookie = cohc->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- cohc->chan.cookie = cookie;
- cohd->desc.cookie = cookie;
-
- return cookie;
-}
static struct coh901318_desc *
coh901318_desc_get(struct coh901318_chan *cohc)
@@ -705,7 +691,7 @@ static void dma_tasklet(unsigned long data)
callback_param = cohd_fin->desc.callback_param;
/* sign this job as completed on the channel */
- cohc->completed = cohd_fin->desc.cookie;
+ dma_cookie_complete(&cohd_fin->desc);
/* release the lli allocation and remove the descriptor */
coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
@@ -929,7 +915,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
coh901318_config(cohc, NULL);
cohc->allocated = 1;
- cohc->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
spin_unlock_irqrestore(&cohc->lock, flags);
@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
desc);
struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
unsigned long flags;
+ dma_cookie_t cookie;
spin_lock_irqsave(&cohc->lock, flags);
-
- tx->cookie = coh901318_assign_cookie(cohc, cohd);
+ cookie = dma_cookie_assign(tx);
coh901318_desc_queue(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flags);
- return tx->cookie;
+ return cookie;
}
static struct dma_async_tx_descriptor *
@@ -1035,7 +1021,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *lli;
@@ -1165,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
-
- last_complete = cohc->completed;
- last_used = chan->cookie;
+ enum dma_status ret;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ /* FIXME: should be conditional on ret != DMA_SUCCESS? */
+ dma_set_residue(txstate, coh901318_get_bytes_left(chan));
- dma_set_tx_state(txstate, last_complete, last_used,
- coh901318_get_bytes_left(chan));
if (ret == DMA_IN_PROGRESS && cohc->stopped)
ret = DMA_PAUSED;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a6c6051ec858..767bcc31b365 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
- pr_debug("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -564,8 +564,8 @@ void dmaengine_get(void)
list_del_rcu(&device->global_node);
break;
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_err("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
}
}
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
new file mode 100644
index 000000000000..17f983a4e9ba
--- /dev/null
+++ b/drivers/dma/dmaengine.h
@@ -0,0 +1,89 @@
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+#endif
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9b592b02b5f4..7439079f5eed 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -9,6 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -22,6 +23,7 @@
#include <linux/slab.h>
#include "dw_dmac_regs.h"
+#include "dmaengine.h"
/*
* This supports the Synopsys "DesignWare AHB Central DMA Controller",
@@ -33,19 +35,23 @@
* which does not support descriptor writeback.
*/
-#define DWC_DEFAULT_CTLLO(private) ({ \
- struct dw_dma_slave *__slave = (private); \
- int dms = __slave ? __slave->dst_master : 0; \
- int sms = __slave ? __slave->src_master : 1; \
- u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
- u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
+#define DWC_DEFAULT_CTLLO(_chan) ({ \
+ struct dw_dma_slave *__slave = (_chan->private); \
+ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
+ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
+ int _dms = __slave ? __slave->dst_master : 0; \
+ int _sms = __slave ? __slave->src_master : 1; \
+ u8 _smsize = __slave ? _sconfig->src_maxburst : \
+ DW_DMA_MSIZE_16; \
+ u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
+ DW_DMA_MSIZE_16; \
\
- (DWC_CTLL_DST_MSIZE(dmsize) \
- | DWC_CTLL_SRC_MSIZE(smsize) \
+ (DWC_CTLL_DST_MSIZE(_dmsize) \
+ | DWC_CTLL_SRC_MSIZE(_smsize) \
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
- | DWC_CTLL_DMS(dms) \
- | DWC_CTLL_SMS(sms)); \
+ | DWC_CTLL_DMS(_dms) \
+ | DWC_CTLL_SMS(_sms)); \
})
/*
@@ -151,21 +157,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
-/* Called with dwc->lock held and bh disabled */
-static dma_cookie_t
-dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
- dma_cookie_t cookie = dwc->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- dwc->chan.cookie = cookie;
- desc->txd.cookie = cookie;
-
- return cookie;
-}
-
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -192,7 +183,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
dwc->initialized = true;
@@ -245,7 +235,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&dwc->lock, flags);
- dwc->completed = txd->cookie;
+ dma_cookie_complete(txd);
if (callback_required) {
callback = txd->callback;
param = txd->callback_param;
@@ -329,12 +319,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
- /*
- * Clear block interrupt flag before scanning so that we don't
- * miss any, and read LLP before RAW_XFER to ensure it is
- * valid if we decide to scan the list.
- */
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
llp = channel_readl(dwc, LLP);
status_xfer = dma_readl(dw, RAW.XFER);
@@ -470,17 +454,16 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
/* called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
- u32 status_block, u32 status_err, u32 status_xfer)
+ u32 status_err, u32 status_xfer)
{
unsigned long flags;
- if (status_block & dwc->mask) {
+ if (dwc->mask) {
void (*callback)(void *param);
void *callback_param;
dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
channel_readl(dwc, LLP));
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
@@ -520,7 +503,6 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0);
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -537,36 +519,29 @@ static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc;
- u32 status_block;
u32 status_xfer;
u32 status_err;
int i;
- status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
- dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
- status_block, status_err);
+ dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
- dwc_handle_cyclic(dw, dwc, status_block, status_err,
- status_xfer);
+ dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
- else if ((status_block | status_xfer) & (1 << i))
+ else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
}
/*
- * Re-enable interrupts. Block Complete interrupts are only
- * enabled if the INT_EN bit in the descriptor is set. This
- * will trigger a scan before the whole list is done.
+ * Re-enable interrupts.
*/
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}
@@ -583,7 +558,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
* softirq handler.
*/
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
status = dma_readl(dw, STATUS_INT);
@@ -594,7 +568,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
/* Try to recover */
channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
- channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -615,7 +588,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
- cookie = dwc_assign_cookie(dwc, desc);
+ cookie = dma_cookie_assign(tx);
/*
* REVISIT: We should attempt to chain as many descriptors as
@@ -674,7 +647,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
else
src_width = dst_width = 0;
- ctllo = DWC_DEFAULT_CTLLO(chan->private)
+ ctllo = DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC
@@ -731,10 +704,11 @@ err_desc_get:
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = chan->private;
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
u32 ctllo;
@@ -750,25 +724,34 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(!dws || !sg_len))
return NULL;
- reg_width = dws->reg_width;
prev = first = NULL;
switch (direction) {
case DMA_MEM_TO_DEV:
- ctllo = (DWC_DEFAULT_CTLLO(chan->private)
+ reg_width = __fls(sconfig->dst_addr_width);
+ reg = sconfig->dst_addr;
+ ctllo = (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
- | DWC_CTLL_SRC_INC
- | DWC_CTLL_FC(dws->fc));
- reg = dws->tx_reg;
+ | DWC_CTLL_SRC_INC);
+
+ ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+ DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
+
+ if (!((mem | len) & 7))
+ mem_width = 3;
+ else if (!((mem | len) & 3))
+ mem_width = 2;
+ else if (!((mem | len) & 1))
+ mem_width = 1;
+ else
mem_width = 0;
slave_sg_todev_fill_desc:
@@ -812,21 +795,30 @@ slave_sg_todev_fill_desc:
}
break;
case DMA_DEV_TO_MEM:
- ctllo = (DWC_DEFAULT_CTLLO(chan->private)
+ reg_width = __fls(sconfig->src_addr_width);
+ reg = sconfig->src_addr;
+ ctllo = (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
- | DWC_CTLL_SRC_FIX
- | DWC_CTLL_FC(dws->fc));
+ | DWC_CTLL_SRC_FIX);
+
+ ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+ DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
+
+ if (!((mem | len) & 7))
+ mem_width = 3;
+ else if (!((mem | len) & 3))
+ mem_width = 2;
+ else if (!((mem | len) & 1))
+ mem_width = 1;
+ else
mem_width = 0;
slave_sg_fromdev_fill_desc:
@@ -890,6 +882,39 @@ err_desc_get:
return NULL;
}
+/*
+ * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * NOTE: burst size 2 is not supported by controller.
+ *
+ * This can be done by finding least significant bit set: n & (n - 1)
+ */
+static inline void convert_burst(u32 *maxburst)
+{
+ if (*maxburst > 1)
+ *maxburst = fls(*maxburst) - 2;
+ else
+ *maxburst = 0;
+}
+
+static int
+set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+
+ /* Check if it is chan is configured for slave transfers */
+ if (!chan->private)
+ return -EINVAL;
+
+ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+
+ convert_burst(&dwc->dma_sconfig.src_maxburst);
+ convert_burst(&dwc->dma_sconfig.dst_maxburst);
+
+ return 0;
+}
+
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -939,8 +964,11 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc, false);
- } else
+ } else if (cmd == DMA_SLAVE_CONFIG) {
+ return set_runtime_config(chan, (struct dma_slave_config *)arg);
+ } else {
return -ENXIO;
+ }
return 0;
}
@@ -951,28 +979,17 @@ dwc_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
-
- last_complete = dwc->completed;
- last_used = chan->cookie;
+ enum dma_status ret;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- last_complete = dwc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
}
if (ret != DMA_SUCCESS)
- dma_set_tx_state(txstate, last_complete, last_used,
- dwc_first_active(dwc)->len);
- else
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ dma_set_residue(txstate, dwc_first_active(dwc)->len);
if (dwc->paused)
return DMA_PAUSED;
@@ -1004,7 +1021,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
return -EIO;
}
- dwc->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
/*
* NOTE: some controllers may have additional features that we
@@ -1068,7 +1085,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
- channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1120,7 +1136,6 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
return -EBUSY;
}
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -1175,11 +1190,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
enum dma_transfer_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_cyclic_desc *cdesc;
struct dw_cyclic_desc *retval = NULL;
struct dw_desc *desc;
struct dw_desc *last = NULL;
- struct dw_dma_slave *dws = chan->private;
unsigned long was_cyclic;
unsigned int reg_width;
unsigned int periods;
@@ -1203,7 +1218,12 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
}
retval = ERR_PTR(-EINVAL);
- reg_width = dws->reg_width;
+
+ if (direction == DMA_MEM_TO_DEV)
+ reg_width = __ffs(sconfig->dst_addr_width);
+ else
+ reg_width = __ffs(sconfig->src_addr_width);
+
periods = buf_len / period_len;
/* Check for too big/unaligned periods and unaligned DMA buffer. */
@@ -1236,26 +1256,34 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
switch (direction) {
case DMA_MEM_TO_DEV:
- desc->lli.dar = dws->tx_reg;
+ desc->lli.dar = sconfig->dst_addr;
desc->lli.sar = buf_addr + (period_len * i);
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
- | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
+
+ desc->lli.ctllo |= sconfig->device_fc ?
+ DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+ DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+
break;
case DMA_DEV_TO_MEM:
desc->lli.dar = buf_addr + (period_len * i);
- desc->lli.sar = dws->rx_reg;
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
+ desc->lli.sar = sconfig->src_addr;
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
- | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
+
+ desc->lli.ctllo |= sconfig->device_fc ?
+ DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+ DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+
break;
default:
break;
@@ -1322,7 +1350,6 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -1347,7 +1374,6 @@ static void dw_dma_off(struct dw_dma *dw)
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1369,7 +1395,7 @@ static int __init dw_probe(struct platform_device *pdev)
int err;
int i;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
return -EINVAL;
@@ -1423,7 +1449,7 @@ static int __init dw_probe(struct platform_device *pdev)
struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma;
- dwc->chan.cookie = dwc->completed = 1;
+ dma_cookie_init(&dwc->chan);
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node,
&dw->dma.channels);
@@ -1432,7 +1458,7 @@ static int __init dw_probe(struct platform_device *pdev)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = 7 - i;
+ dwc->priority = pdata->nr_channels - i - 1;
else
dwc->priority = i;
@@ -1449,13 +1475,11 @@ static int __init dw_probe(struct platform_device *pdev)
/* Clear/disable all interrupts on all channels. */
dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
- dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1562,6 +1586,10 @@ static int dw_resume_noirq(struct device *dev)
static const struct dev_pm_ops dw_dev_pm_ops = {
.suspend_noirq = dw_suspend_noirq,
.resume_noirq = dw_resume_noirq,
+ .freeze_noirq = dw_suspend_noirq,
+ .thaw_noirq = dw_resume_noirq,
+ .restore_noirq = dw_resume_noirq,
+ .poweroff_noirq = dw_suspend_noirq,
};
static struct platform_driver dw_driver = {
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 5eef6946a367..f298f69ecbf9 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -13,6 +13,18 @@
#define DW_DMA_MAX_NR_CHANNELS 8
+/* flow controller */
+enum dw_dma_fc {
+ DW_DMA_FC_D_M2M,
+ DW_DMA_FC_D_M2P,
+ DW_DMA_FC_D_P2M,
+ DW_DMA_FC_D_P2P,
+ DW_DMA_FC_P_P2M,
+ DW_DMA_FC_SP_P2P,
+ DW_DMA_FC_P_M2P,
+ DW_DMA_FC_DP_P2P,
+};
+
/*
* Redefine this macro to handle differences between 32- and 64-bit
* addressing, big vs. little endian, etc.
@@ -146,13 +158,15 @@ struct dw_dma_chan {
/* these other elements are all protected by lock */
unsigned long flags;
- dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
+
+ /* configuration passed via DMA_SLAVE_CONFIG */
+ struct dma_slave_config dma_sconfig;
};
static inline struct dw_dma_chan_regs __iomem *
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 59e7a965772b..e6f133b78dc2 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -28,6 +28,8 @@
#include <mach/dma.h>
+#include "dmaengine.h"
+
/* M2P registers */
#define M2P_CONTROL 0x0000
#define M2P_CONTROL_STALLINT BIT(0)
@@ -122,7 +124,6 @@ struct ep93xx_dma_desc {
* @lock: lock protecting the fields following
* @flags: flags for the channel
* @buffer: which buffer to use next (0/1)
- * @last_completed: last completed cookie value
* @active: flattened chain of descriptors currently being processed
* @queue: pending descriptors which are handled next
* @free_list: list of free descriptors which can be used
@@ -157,7 +158,6 @@ struct ep93xx_dma_chan {
#define EP93XX_DMA_IS_CYCLIC 0
int buffer;
- dma_cookie_t last_completed;
struct list_head active;
struct list_head queue;
struct list_head free_list;
@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
desc = ep93xx_dma_get_active(edmac);
if (desc) {
if (desc->complete) {
- edmac->last_completed = desc->txd.cookie;
+ dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}
callback = desc->txd.callback;
@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags;
spin_lock_irqsave(&edmac->lock, flags);
-
- cookie = edmac->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
+ cookie = dma_cookie_assign(tx);
desc = container_of(tx, struct ep93xx_dma_desc, txd);
- edmac->chan.cookie = cookie;
- desc->txd.cookie = cookie;
-
/*
* If nothing is currently prosessed, we push this descriptor
* directly to the hardware. Otherwise we put the descriptor
@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
goto fail_clk_disable;
spin_lock_irq(&edmac->lock);
- edmac->last_completed = 1;
- edmac->chan.cookie = 1;
+ dma_cookie_init(&edmac->chan);
ret = edmac->edma->hw_setup(edmac);
spin_unlock_irq(&edmac->lock);
@@ -983,13 +975,14 @@ fail:
* @sg_len: number of entries in @sgl
* @dir: direction of tha DMA transfer
* @flags: flags for the descriptor
+ * @context: operation context (ignored)
*
* Returns a valid DMA descriptor or %NULL in case of failure.
*/
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1056,6 +1049,7 @@ fail:
* @buf_len: length of the buffer (in bytes)
* @period_len: lenght of a single period
* @dir: direction of the operation
+ * @context: operation context (ignored)
*
* Prepares a descriptor for cyclic DMA operation. This means that once the
* descriptor is submitted, we will be submitting in a @period_len sized
@@ -1068,7 +1062,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction dir)
+ enum dma_transfer_direction dir, void *context)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1248,18 +1242,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
struct dma_tx_state *state)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- dma_cookie_t last_used, last_completed;
enum dma_status ret;
unsigned long flags;
spin_lock_irqsave(&edmac->lock, flags);
- last_used = chan->cookie;
- last_completed = edmac->last_completed;
+ ret = dma_cookie_status(chan, cookie, state);
spin_unlock_irqrestore(&edmac->lock, flags);
- ret = dma_async_is_complete(cookie, last_completed, last_used);
- dma_set_tx_state(state, last_completed, last_used, 0);
-
return ret;
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b98070c33ca9..8f84761f98ba 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -35,6 +35,7 @@
#include <linux/dmapool.h>
#include <linux/of_platform.h>
+#include "dmaengine.h"
#include "fsldma.h"
#define chan_dbg(chan, fmt, arg...) \
@@ -413,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
* assign cookies to all of the software descriptors
* that make up this transaction
*/
- cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
- cookie++;
- if (cookie < DMA_MIN_COOKIE)
- cookie = DMA_MIN_COOKIE;
-
- child->async_tx.cookie = cookie;
+ cookie = dma_cookie_assign(&child->async_tx);
}
- chan->common.cookie = cookie;
-
/* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
@@ -765,6 +759,7 @@ fail:
* @sg_len: number of entries in @scatterlist
* @direction: DMA direction
* @flags: DMAEngine flags
+ * @context: transaction context (ignored)
*
* Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
* DMA_SLAVE API, this gets the device-specific information from the
@@ -772,7 +767,8 @@ fail:
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
/*
* This operation is not supported on the Freescale DMA controller
@@ -984,19 +980,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- dma_cookie_t last_complete;
- dma_cookie_t last_used;
+ enum dma_status ret;
unsigned long flags;
spin_lock_irqsave(&chan->desc_lock, flags);
-
- last_complete = chan->completed_cookie;
- last_used = dchan->cookie;
-
+ ret = dma_cookie_status(dchan, cookie, txstate);
spin_unlock_irqrestore(&chan->desc_lock, flags);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- return dma_async_is_complete(cookie, last_complete, last_used);
+ return ret;
}
/*----------------------------------------------------------------------------*/
@@ -1087,8 +1078,8 @@ static void dma_do_tasklet(unsigned long data)
desc = to_fsl_desc(chan->ld_running.prev);
cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
- chan->completed_cookie = cookie;
chan_dbg(chan, "completed_cookie=%d\n", cookie);
}
@@ -1303,6 +1294,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
chan->idle = true;
chan->common.device = &fdev->common;
+ dma_cookie_init(&chan->common);
/* find the IRQ line, if it exists in the device tree */
chan->irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 9cb5aa57c677..f5c38791fc74 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -137,7 +137,6 @@ struct fsldma_device {
struct fsldma_chan {
char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
- dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_pending; /* Link descriptors queue */
struct list_head ld_running; /* Link descriptors queue */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 38586ba8da91..a45b5d2a5987 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -5,6 +5,7 @@
* found on i.MX1/21/27
*
* Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -22,37 +23,159 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <asm/irq.h>
-#include <mach/dma-v1.h>
+#include <mach/dma.h>
#include <mach/hardware.h>
+#include "dmaengine.h"
+#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
+#define IMX_DMA_CHANNELS 16
+
+#define IMX_DMA_2D_SLOTS 2
+#define IMX_DMA_2D_SLOT_A 0
+#define IMX_DMA_2D_SLOT_B 1
+
+#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
+#define IMX_DMA_MEMSIZE_32 (0 << 4)
+#define IMX_DMA_MEMSIZE_8 (1 << 4)
+#define IMX_DMA_MEMSIZE_16 (2 << 4)
+#define IMX_DMA_TYPE_LINEAR (0 << 10)
+#define IMX_DMA_TYPE_2D (1 << 10)
+#define IMX_DMA_TYPE_FIFO (2 << 10)
+
+#define IMX_DMA_ERR_BURST (1 << 0)
+#define IMX_DMA_ERR_REQUEST (1 << 1)
+#define IMX_DMA_ERR_TRANSFER (1 << 2)
+#define IMX_DMA_ERR_BUFFER (1 << 3)
+#define IMX_DMA_ERR_TIMEOUT (1 << 4)
+
+#define DMA_DCR 0x00 /* Control Register */
+#define DMA_DISR 0x04 /* Interrupt status Register */
+#define DMA_DIMR 0x08 /* Interrupt mask Register */
+#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
+#define DMA_DRTOSR 0x10 /* Request timeout Register */
+#define DMA_DSESR 0x14 /* Transfer Error Status Register */
+#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
+#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
+#define DMA_WSRA 0x40 /* W-Size Register A */
+#define DMA_XSRA 0x44 /* X-Size Register A */
+#define DMA_YSRA 0x48 /* Y-Size Register A */
+#define DMA_WSRB 0x4c /* W-Size Register B */
+#define DMA_XSRB 0x50 /* X-Size Register B */
+#define DMA_YSRB 0x54 /* Y-Size Register B */
+#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
+#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
+#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
+#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
+#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
+#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
+#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
+#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
+#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
+
+#define DCR_DRST (1<<1)
+#define DCR_DEN (1<<0)
+#define DBTOCR_EN (1<<15)
+#define DBTOCR_CNT(x) ((x) & 0x7fff)
+#define CNTR_CNT(x) ((x) & 0xffffff)
+#define CCR_ACRPT (1<<14)
+#define CCR_DMOD_LINEAR (0x0 << 12)
+#define CCR_DMOD_2D (0x1 << 12)
+#define CCR_DMOD_FIFO (0x2 << 12)
+#define CCR_DMOD_EOBFIFO (0x3 << 12)
+#define CCR_SMOD_LINEAR (0x0 << 10)
+#define CCR_SMOD_2D (0x1 << 10)
+#define CCR_SMOD_FIFO (0x2 << 10)
+#define CCR_SMOD_EOBFIFO (0x3 << 10)
+#define CCR_MDIR_DEC (1<<9)
+#define CCR_MSEL_B (1<<8)
+#define CCR_DSIZ_32 (0x0 << 6)
+#define CCR_DSIZ_8 (0x1 << 6)
+#define CCR_DSIZ_16 (0x2 << 6)
+#define CCR_SSIZ_32 (0x0 << 4)
+#define CCR_SSIZ_8 (0x1 << 4)
+#define CCR_SSIZ_16 (0x2 << 4)
+#define CCR_REN (1<<3)
+#define CCR_RPT (1<<2)
+#define CCR_FRC (1<<1)
+#define CCR_CEN (1<<0)
+#define RTOR_EN (1<<15)
+#define RTOR_CLK (1<<14)
+#define RTOR_PSC (1<<13)
+
+enum imxdma_prep_type {
+ IMXDMA_DESC_MEMCPY,
+ IMXDMA_DESC_INTERLEAVED,
+ IMXDMA_DESC_SLAVE_SG,
+ IMXDMA_DESC_CYCLIC,
+};
+
+struct imx_dma_2d_config {
+ u16 xsr;
+ u16 ysr;
+ u16 wsr;
+ int count;
+};
+
+struct imxdma_desc {
+ struct list_head node;
+ struct dma_async_tx_descriptor desc;
+ enum dma_status status;
+ dma_addr_t src;
+ dma_addr_t dest;
+ size_t len;
+ enum dma_transfer_direction direction;
+ enum imxdma_prep_type type;
+ /* For memcpy and interleaved */
+ unsigned int config_port;
+ unsigned int config_mem;
+ /* For interleaved transfers */
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ /* For slave sg and cyclic */
+ struct scatterlist *sg;
+ unsigned int sgcount;
+};
+
struct imxdma_channel {
+ int hw_chaining;
+ struct timer_list watchdog;
struct imxdma_engine *imxdma;
unsigned int channel;
- unsigned int imxdma_channel;
+ struct tasklet_struct dma_tasklet;
+ struct list_head ld_free;
+ struct list_head ld_queue;
+ struct list_head ld_active;
+ int descs_allocated;
enum dma_slave_buswidth word_size;
dma_addr_t per_address;
u32 watermark_level;
struct dma_chan chan;
- spinlock_t lock;
struct dma_async_tx_descriptor desc;
- dma_cookie_t last_completed;
enum dma_status status;
int dma_request;
struct scatterlist *sg_list;
+ u32 ccr_from_device;
+ u32 ccr_to_device;
+ bool enabled_2d;
+ int slot_2d;
};
-#define MAX_DMA_CHANNELS 8
-
struct imxdma_engine {
struct device *dev;
struct device_dma_parameters dma_parms;
struct dma_device dma_device;
- struct imxdma_channel channel[MAX_DMA_CHANNELS];
+ void __iomem *base;
+ struct clk *dma_clk;
+ spinlock_t lock;
+ struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
+ struct imxdma_channel channel[IMX_DMA_CHANNELS];
};
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
@@ -60,36 +183,418 @@ static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
return container_of(chan, struct imxdma_channel, chan);
}
-static void imxdma_handle(struct imxdma_channel *imxdmac)
+static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
+{
+ struct imxdma_desc *desc;
+
+ if (!list_empty(&imxdmac->ld_active)) {
+ desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
+ node);
+ if (desc->type == IMXDMA_DESC_CYCLIC)
+ return true;
+ }
+ return false;
+}
+
+
+
+static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
+ unsigned offset)
+{
+ __raw_writel(val, imxdma->base + offset);
+}
+
+static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
+{
+ return __raw_readl(imxdma->base + offset);
+}
+
+static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
+{
+ if (cpu_is_mx27())
+ return imxdmac->hw_chaining;
+ else
+ return 0;
+}
+
+/*
+ * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
+ */
+static inline int imxdma_sg_next(struct imxdma_desc *d)
{
- if (imxdmac->desc.callback)
- imxdmac->desc.callback(imxdmac->desc.callback_param);
- imxdmac->last_completed = imxdmac->desc.cookie;
+ struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ struct scatterlist *sg = d->sg;
+ unsigned long now;
+
+ now = min(d->len, sg->length);
+ if (d->len != IMX_DMA_LENGTH_LOOP)
+ d->len -= now;
+
+ if (d->direction == DMA_DEV_TO_MEM)
+ imx_dmav1_writel(imxdma, sg->dma_address,
+ DMA_DAR(imxdmac->channel));
+ else
+ imx_dmav1_writel(imxdma, sg->dma_address,
+ DMA_SAR(imxdmac->channel));
+
+ imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
+
+ dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
+ "size 0x%08x\n", __func__, imxdmac->channel,
+ imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
+ imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
+ imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
+
+ return now;
}
-static void imxdma_irq_handler(int channel, void *data)
+static void imxdma_enable_hw(struct imxdma_desc *d)
{
- struct imxdma_channel *imxdmac = data;
+ struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ int channel = imxdmac->channel;
+ unsigned long flags;
+
+ dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
+
+ local_irq_save(flags);
+
+ imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
+ imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
+ ~(1 << channel), DMA_DIMR);
+ imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
+ CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
+
+ if ((cpu_is_mx21() || cpu_is_mx27()) &&
+ d->sg && imxdma_hw_chain(imxdmac)) {
+ d->sg = sg_next(d->sg);
+ if (d->sg) {
+ u32 tmp;
+ imxdma_sg_next(d);
+ tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
+ imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
+ DMA_CCR(channel));
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
+{
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ int channel = imxdmac->channel;
+ unsigned long flags;
+
+ dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
+
+ if (imxdma_hw_chain(imxdmac))
+ del_timer(&imxdmac->watchdog);
+
+ local_irq_save(flags);
+ imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
+ (1 << channel), DMA_DIMR);
+ imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
+ ~CCR_CEN, DMA_CCR(channel));
+ imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
+ local_irq_restore(flags);
+}
+
+static void imxdma_watchdog(unsigned long data)
+{
+ struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ int channel = imxdmac->channel;
+
+ imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
- imxdmac->status = DMA_SUCCESS;
- imxdma_handle(imxdmac);
+ /* Tasklet watchdog error handler */
+ tasklet_schedule(&imxdmac->dma_tasklet);
+ dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
+ imxdmac->channel);
}
-static void imxdma_err_handler(int channel, void *data, int error)
+static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
{
- struct imxdma_channel *imxdmac = data;
+ struct imxdma_engine *imxdma = dev_id;
+ unsigned int err_mask;
+ int i, disr;
+ int errcode;
+
+ disr = imx_dmav1_readl(imxdma, DMA_DISR);
+
+ err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
+ imx_dmav1_readl(imxdma, DMA_DRTOSR) |
+ imx_dmav1_readl(imxdma, DMA_DSESR) |
+ imx_dmav1_readl(imxdma, DMA_DBOSR);
+
+ if (!err_mask)
+ return IRQ_HANDLED;
+
+ imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
+
+ for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+ if (!(err_mask & (1 << i)))
+ continue;
+ errcode = 0;
+
+ if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
+ imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
+ errcode |= IMX_DMA_ERR_BURST;
+ }
+ if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
+ imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
+ errcode |= IMX_DMA_ERR_REQUEST;
+ }
+ if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
+ imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
+ errcode |= IMX_DMA_ERR_TRANSFER;
+ }
+ if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
+ imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
+ errcode |= IMX_DMA_ERR_BUFFER;
+ }
+ /* Tasklet error handler */
+ tasklet_schedule(&imxdma->channel[i].dma_tasklet);
+
+ printk(KERN_WARNING
+ "DMA timeout on channel %d -%s%s%s%s\n", i,
+ errcode & IMX_DMA_ERR_BURST ? " burst" : "",
+ errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
+ errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
+ errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
+ }
+ return IRQ_HANDLED;
+}
+
+static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
+{
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ int chno = imxdmac->channel;
+ struct imxdma_desc *desc;
+
+ spin_lock(&imxdma->lock);
+ if (list_empty(&imxdmac->ld_active)) {
+ spin_unlock(&imxdma->lock);
+ goto out;
+ }
+
+ desc = list_first_entry(&imxdmac->ld_active,
+ struct imxdma_desc,
+ node);
+ spin_unlock(&imxdma->lock);
+
+ if (desc->sg) {
+ u32 tmp;
+ desc->sg = sg_next(desc->sg);
+
+ if (desc->sg) {
+ imxdma_sg_next(desc);
+
+ tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
+
+ if (imxdma_hw_chain(imxdmac)) {
+ /* FIXME: The timeout should probably be
+ * configurable
+ */
+ mod_timer(&imxdmac->watchdog,
+ jiffies + msecs_to_jiffies(500));
+
+ tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
+ imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
+ } else {
+ imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
+ DMA_CCR(chno));
+ tmp |= CCR_CEN;
+ }
+
+ imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
+
+ if (imxdma_chan_is_doing_cyclic(imxdmac))
+ /* Tasklet progression */
+ tasklet_schedule(&imxdmac->dma_tasklet);
+
+ return;
+ }
+
+ if (imxdma_hw_chain(imxdmac)) {
+ del_timer(&imxdmac->watchdog);
+ return;
+ }
+ }
+
+out:
+ imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
+ /* Tasklet irq */
+ tasklet_schedule(&imxdmac->dma_tasklet);
+}
+
+static irqreturn_t dma_irq_handler(int irq, void *dev_id)
+{
+ struct imxdma_engine *imxdma = dev_id;
+ int i, disr;
+
+ if (cpu_is_mx21() || cpu_is_mx27())
+ imxdma_err_handler(irq, dev_id);
+
+ disr = imx_dmav1_readl(imxdma, DMA_DISR);
+
+ dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
+
+ imx_dmav1_writel(imxdma, disr, DMA_DISR);
+ for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+ if (disr & (1 << i))
+ dma_irq_handle_channel(&imxdma->channel[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int imxdma_xfer_desc(struct imxdma_desc *d)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ unsigned long flags;
+ int slot = -1;
+ int i;
+
+ /* Configure and enable */
+ switch (d->type) {
+ case IMXDMA_DESC_INTERLEAVED:
+ /* Try to get a free 2D slot */
+ spin_lock_irqsave(&imxdma->lock, flags);
+ for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
+ if ((imxdma->slots_2d[i].count > 0) &&
+ ((imxdma->slots_2d[i].xsr != d->x) ||
+ (imxdma->slots_2d[i].ysr != d->y) ||
+ (imxdma->slots_2d[i].wsr != d->w)))
+ continue;
+ slot = i;
+ break;
+ }
+ if (slot < 0)
+ return -EBUSY;
+
+ imxdma->slots_2d[slot].xsr = d->x;
+ imxdma->slots_2d[slot].ysr = d->y;
+ imxdma->slots_2d[slot].wsr = d->w;
+ imxdma->slots_2d[slot].count++;
+
+ imxdmac->slot_2d = slot;
+ imxdmac->enabled_2d = true;
+ spin_unlock_irqrestore(&imxdma->lock, flags);
+
+ if (slot == IMX_DMA_2D_SLOT_A) {
+ d->config_mem &= ~CCR_MSEL_B;
+ d->config_port &= ~CCR_MSEL_B;
+ imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
+ imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
+ imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
+ } else {
+ d->config_mem |= CCR_MSEL_B;
+ d->config_port |= CCR_MSEL_B;
+ imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
+ imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
+ imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
+ }
+ /*
+ * We fall-through here intentionally, since a 2D transfer is
+ * similar to MEMCPY just adding the 2D slot configuration.
+ */
+ case IMXDMA_DESC_MEMCPY:
+ imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
+ imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
+ imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
+ DMA_CCR(imxdmac->channel));
+
+ imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
+
+ dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
+ "dma_length=%d\n", __func__, imxdmac->channel,
+ d->dest, d->src, d->len);
+
+ break;
+ /* Cyclic transfer is the same as slave_sg with special sg configuration. */
+ case IMXDMA_DESC_CYCLIC:
+ case IMXDMA_DESC_SLAVE_SG:
+ if (d->direction == DMA_DEV_TO_MEM) {
+ imx_dmav1_writel(imxdma, imxdmac->per_address,
+ DMA_SAR(imxdmac->channel));
+ imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
+ DMA_CCR(imxdmac->channel));
+
+ dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
+ "total length=%d dev_addr=0x%08x (dev2mem)\n",
+ __func__, imxdmac->channel, d->sg, d->sgcount,
+ d->len, imxdmac->per_address);
+ } else if (d->direction == DMA_MEM_TO_DEV) {
+ imx_dmav1_writel(imxdma, imxdmac->per_address,
+ DMA_DAR(imxdmac->channel));
+ imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
+ DMA_CCR(imxdmac->channel));
+
+ dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
+ "total length=%d dev_addr=0x%08x (mem2dev)\n",
+ __func__, imxdmac->channel, d->sg, d->sgcount,
+ d->len, imxdmac->per_address);
+ } else {
+ dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
+ __func__, imxdmac->channel);
+ return -EINVAL;
+ }
+
+ imxdma_sg_next(d);
- imxdmac->status = DMA_ERROR;
- imxdma_handle(imxdmac);
+ break;
+ default:
+ return -EINVAL;
+ }
+ imxdma_enable_hw(d);
+ return 0;
}
-static void imxdma_progression(int channel, void *data,
- struct scatterlist *sg)
+static void imxdma_tasklet(unsigned long data)
{
- struct imxdma_channel *imxdmac = data;
+ struct imxdma_channel *imxdmac = (void *)data;
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ struct imxdma_desc *desc;
- imxdmac->status = DMA_SUCCESS;
- imxdma_handle(imxdmac);
+ spin_lock(&imxdma->lock);
+
+ if (list_empty(&imxdmac->ld_active)) {
+ /* Someone might have called terminate all */
+ goto out;
+ }
+ desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
+
+ if (desc->desc.callback)
+ desc->desc.callback(desc->desc.callback_param);
+
+ dma_cookie_complete(&desc->desc);
+
+ /* If we are dealing with a cyclic descriptor keep it on ld_active */
+ if (imxdma_chan_is_doing_cyclic(imxdmac))
+ goto out;
+
+ /* Free 2D slot if it was an interleaved transfer */
+ if (imxdmac->enabled_2d) {
+ imxdma->slots_2d[imxdmac->slot_2d].count--;
+ imxdmac->enabled_2d = false;
+ }
+
+ list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
+
+ if (!list_empty(&imxdmac->ld_queue)) {
+ desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
+ node);
+ list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
+ if (imxdma_xfer_desc(desc) < 0)
+ dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
+ __func__, imxdmac->channel);
+ }
+out:
+ spin_unlock(&imxdma->lock);
}
static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -97,13 +602,18 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct dma_slave_config *dmaengine_cfg = (void *)arg;
- int ret;
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ unsigned long flags;
unsigned int mode = 0;
switch (cmd) {
case DMA_TERMINATE_ALL:
- imxdmac->status = DMA_ERROR;
- imx_dma_disable(imxdmac->imxdma_channel);
+ imxdma_disable_hw(imxdmac);
+
+ spin_lock_irqsave(&imxdma->lock, flags);
+ list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
+ list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
+ spin_unlock_irqrestore(&imxdma->lock, flags);
return 0;
case DMA_SLAVE_CONFIG:
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
@@ -128,16 +638,22 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
mode = IMX_DMA_MEMSIZE_32;
break;
}
- ret = imx_dma_config_channel(imxdmac->imxdma_channel,
- mode | IMX_DMA_TYPE_FIFO,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
- imxdmac->dma_request, 1);
-
- if (ret)
- return ret;
- imx_dma_config_burstlen(imxdmac->imxdma_channel,
- imxdmac->watermark_level * imxdmac->word_size);
+ imxdmac->hw_chaining = 1;
+ if (!imxdma_hw_chain(imxdmac))
+ return -EINVAL;
+ imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
+ ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
+ CCR_REN;
+ imxdmac->ccr_to_device =
+ (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
+ ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
+ imx_dmav1_writel(imxdma, imxdmac->dma_request,
+ DMA_RSSR(imxdmac->channel));
+
+ /* Set burst length */
+ imx_dmav1_writel(imxdma, imxdmac->watermark_level *
+ imxdmac->word_size, DMA_BLR(imxdmac->channel));
return 0;
default:
@@ -151,43 +667,20 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
- dma_cookie_t last_used;
- enum dma_status ret;
-
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
- dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
-
- return ret;
-}
-
-static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
-{
- dma_cookie_t cookie = imxdma->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- imxdma->chan.cookie = cookie;
- imxdma->desc.cookie = cookie;
-
- return cookie;
+ return dma_cookie_status(chan, cookie, txstate);
}
static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_irq(&imxdmac->lock);
-
- cookie = imxdma_assign_cookie(imxdmac);
-
- imx_dma_enable(imxdmac->imxdma_channel);
-
- spin_unlock_irq(&imxdmac->lock);
+ spin_lock_irqsave(&imxdma->lock, flags);
+ list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock_irqrestore(&imxdma->lock, flags);
return cookie;
}
@@ -197,23 +690,52 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imx_dma_data *data = chan->private;
- imxdmac->dma_request = data->dma_request;
+ if (data != NULL)
+ imxdmac->dma_request = data->dma_request;
- dma_async_tx_descriptor_init(&imxdmac->desc, chan);
- imxdmac->desc.tx_submit = imxdma_tx_submit;
- /* txd.flags will be overwritten in prep funcs */
- imxdmac->desc.flags = DMA_CTRL_ACK;
+ while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
+ struct imxdma_desc *desc;
- imxdmac->status = DMA_SUCCESS;
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+ __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
+ dma_async_tx_descriptor_init(&desc->desc, chan);
+ desc->desc.tx_submit = imxdma_tx_submit;
+ /* txd.flags will be overwritten in prep funcs */
+ desc->desc.flags = DMA_CTRL_ACK;
+ desc->status = DMA_SUCCESS;
+
+ list_add_tail(&desc->node, &imxdmac->ld_free);
+ imxdmac->descs_allocated++;
+ }
- return 0;
+ if (!imxdmac->descs_allocated)
+ return -ENOMEM;
+
+ return imxdmac->descs_allocated;
}
static void imxdma_free_chan_resources(struct dma_chan *chan)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ struct imxdma_desc *desc, *_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imxdma->lock, flags);
+
+ imxdma_disable_hw(imxdmac);
+ list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
+ list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
- imx_dma_disable(imxdmac->imxdma_channel);
+ spin_unlock_irqrestore(&imxdma->lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
+ kfree(desc);
+ imxdmac->descs_allocated--;
+ }
+ INIT_LIST_HEAD(&imxdmac->ld_free);
if (imxdmac->sg_list) {
kfree(imxdmac->sg_list);
@@ -224,27 +746,23 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct scatterlist *sg;
- int i, ret, dma_length = 0;
- unsigned int dmamode;
+ int i, dma_length = 0;
+ struct imxdma_desc *desc;
- if (imxdmac->status == DMA_IN_PROGRESS)
+ if (list_empty(&imxdmac->ld_free) ||
+ imxdma_chan_is_doing_cyclic(imxdmac))
return NULL;
- imxdmac->status = DMA_IN_PROGRESS;
+ desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
for_each_sg(sgl, sg, sg_len, i) {
dma_length += sg->length;
}
- if (direction == DMA_DEV_TO_MEM)
- dmamode = DMA_MODE_READ;
- else
- dmamode = DMA_MODE_WRITE;
-
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
if (sgl->length & 3 || sgl->dma_address & 3)
@@ -260,37 +778,41 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
return NULL;
}
- ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
- dma_length, imxdmac->per_address, dmamode);
- if (ret)
- return NULL;
+ desc->type = IMXDMA_DESC_SLAVE_SG;
+ desc->sg = sgl;
+ desc->sgcount = sg_len;
+ desc->len = dma_length;
+ desc->direction = direction;
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->src = imxdmac->per_address;
+ } else {
+ desc->dest = imxdmac->per_address;
+ }
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
- return &imxdmac->desc;
+ return &desc->desc;
}
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
- int i, ret;
+ struct imxdma_desc *desc;
+ int i;
unsigned int periods = buf_len / period_len;
- unsigned int dmamode;
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
__func__, imxdmac->channel, buf_len, period_len);
- if (imxdmac->status == DMA_IN_PROGRESS)
+ if (list_empty(&imxdmac->ld_free) ||
+ imxdma_chan_is_doing_cyclic(imxdmac))
return NULL;
- imxdmac->status = DMA_IN_PROGRESS;
- ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
- imxdma_progression);
- if (ret) {
- dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
- return NULL;
- }
+ desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
if (imxdmac->sg_list)
kfree(imxdmac->sg_list);
@@ -316,62 +838,221 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
- if (direction == DMA_DEV_TO_MEM)
- dmamode = DMA_MODE_READ;
- else
- dmamode = DMA_MODE_WRITE;
+ desc->type = IMXDMA_DESC_CYCLIC;
+ desc->sg = imxdmac->sg_list;
+ desc->sgcount = periods;
+ desc->len = IMX_DMA_LENGTH_LOOP;
+ desc->direction = direction;
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->src = imxdmac->per_address;
+ } else {
+ desc->dest = imxdmac->per_address;
+ }
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
+
+ return &desc->desc;
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ struct imxdma_desc *desc;
- ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
- IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
- if (ret)
+ dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
+ __func__, imxdmac->channel, src, dest, len);
+
+ if (list_empty(&imxdmac->ld_free) ||
+ imxdma_chan_is_doing_cyclic(imxdmac))
return NULL;
- return &imxdmac->desc;
+ desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
+
+ desc->type = IMXDMA_DESC_MEMCPY;
+ desc->src = src;
+ desc->dest = dest;
+ desc->len = len;
+ desc->direction = DMA_MEM_TO_MEM;
+ desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
+ desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
+
+ return &desc->desc;
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ struct imxdma_desc *desc;
+
+ dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
+ " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
+ imxdmac->channel, xt->src_start, xt->dst_start,
+ xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
+ xt->numf, xt->frame_size);
+
+ if (list_empty(&imxdmac->ld_free) ||
+ imxdma_chan_is_doing_cyclic(imxdmac))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
+ return NULL;
+
+ desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
+
+ desc->type = IMXDMA_DESC_INTERLEAVED;
+ desc->src = xt->src_start;
+ desc->dest = xt->dst_start;
+ desc->x = xt->sgl[0].size;
+ desc->y = xt->numf;
+ desc->w = xt->sgl[0].icg + desc->x;
+ desc->len = desc->x * desc->y;
+ desc->direction = DMA_MEM_TO_MEM;
+ desc->config_port = IMX_DMA_MEMSIZE_32;
+ desc->config_mem = IMX_DMA_MEMSIZE_32;
+ if (xt->src_sgl)
+ desc->config_mem |= IMX_DMA_TYPE_2D;
+ if (xt->dst_sgl)
+ desc->config_port |= IMX_DMA_TYPE_2D;
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
+
+ return &desc->desc;
}
static void imxdma_issue_pending(struct dma_chan *chan)
{
- /*
- * Nothing to do. We only have a single descriptor
- */
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ struct imxdma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imxdma->lock, flags);
+ if (list_empty(&imxdmac->ld_active) &&
+ !list_empty(&imxdmac->ld_queue)) {
+ desc = list_first_entry(&imxdmac->ld_queue,
+ struct imxdma_desc, node);
+
+ if (imxdma_xfer_desc(desc) < 0) {
+ dev_warn(imxdma->dev,
+ "%s: channel: %d couldn't issue DMA xfer\n",
+ __func__, imxdmac->channel);
+ } else {
+ list_move_tail(imxdmac->ld_queue.next,
+ &imxdmac->ld_active);
+ }
+ }
+ spin_unlock_irqrestore(&imxdma->lock, flags);
}
static int __init imxdma_probe(struct platform_device *pdev)
-{
+ {
struct imxdma_engine *imxdma;
int ret, i;
+
imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
+ if (cpu_is_mx1()) {
+ imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
+ } else if (cpu_is_mx21()) {
+ imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
+ } else if (cpu_is_mx27()) {
+ imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
+ } else {
+ kfree(imxdma);
+ return 0;
+ }
+
+ imxdma->dma_clk = clk_get(NULL, "dma");
+ if (IS_ERR(imxdma->dma_clk))
+ return PTR_ERR(imxdma->dma_clk);
+ clk_enable(imxdma->dma_clk);
+
+ /* reset DMA module */
+ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
+
+ if (cpu_is_mx1()) {
+ ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
+ if (ret) {
+ dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
+ kfree(imxdma);
+ return ret;
+ }
+
+ ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
+ if (ret) {
+ dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
+ free_irq(MX1_DMA_INT, NULL);
+ kfree(imxdma);
+ return ret;
+ }
+ }
+
+ /* enable DMA module */
+ imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
+
+ /* clear all interrupts */
+ imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
+
+ /* disable interrupts */
+ imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
+
INIT_LIST_HEAD(&imxdma->dma_device.channels);
dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
+
+ /* Initialize 2D global parameters */
+ for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
+ imxdma->slots_2d[i].count = 0;
+
+ spin_lock_init(&imxdma->lock);
/* Initialize channel parameters */
- for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ for (i = 0; i < IMX_DMA_CHANNELS; i++) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
- imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
- DMA_PRIO_MEDIUM);
- if ((int)imxdmac->channel < 0) {
- ret = -ENODEV;
- goto err_init;
+ if (cpu_is_mx21() || cpu_is_mx27()) {
+ ret = request_irq(MX2x_INT_DMACH0 + i,
+ dma_irq_handler, 0, "DMA", imxdma);
+ if (ret) {
+ dev_warn(imxdma->dev, "Can't register IRQ %d "
+ "for DMA channel %d\n",
+ MX2x_INT_DMACH0 + i, i);
+ goto err_init;
+ }
+ init_timer(&imxdmac->watchdog);
+ imxdmac->watchdog.function = &imxdma_watchdog;
+ imxdmac->watchdog.data = (unsigned long)imxdmac;
}
- imx_dma_setup_handlers(imxdmac->imxdma_channel,
- imxdma_irq_handler, imxdma_err_handler, imxdmac);
-
imxdmac->imxdma = imxdma;
- spin_lock_init(&imxdmac->lock);
+ INIT_LIST_HEAD(&imxdmac->ld_queue);
+ INIT_LIST_HEAD(&imxdmac->ld_free);
+ INIT_LIST_HEAD(&imxdmac->ld_active);
+
+ tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
+ (unsigned long)imxdmac);
imxdmac->chan.device = &imxdma->dma_device;
+ dma_cookie_init(&imxdmac->chan);
imxdmac->channel = i;
/* Add the channel to the DMAC list */
- list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
+ list_add_tail(&imxdmac->chan.device_node,
+ &imxdma->dma_device.channels);
}
imxdma->dev = &pdev->dev;
@@ -382,11 +1063,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->dma_device.device_tx_status = imxdma_tx_status;
imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
+ imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
+ imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
imxdma->dma_device.device_control = imxdma_control;
imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
platform_set_drvdata(pdev, imxdma);
+ imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
@@ -399,9 +1083,13 @@ static int __init imxdma_probe(struct platform_device *pdev)
return 0;
err_init:
- while (--i >= 0) {
- struct imxdma_channel *imxdmac = &imxdma->channel[i];
- imx_dma_free(imxdmac->imxdma_channel);
+
+ if (cpu_is_mx21() || cpu_is_mx27()) {
+ while (--i >= 0)
+ free_irq(MX2x_INT_DMACH0 + i, NULL);
+ } else if cpu_is_mx1() {
+ free_irq(MX1_DMA_INT, NULL);
+ free_irq(MX1_DMA_ERR, NULL);
}
kfree(imxdma);
@@ -415,10 +1103,12 @@ static int __exit imxdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&imxdma->dma_device);
- for (i = 0; i < MAX_DMA_CHANNELS; i++) {
- struct imxdma_channel *imxdmac = &imxdma->channel[i];
-
- imx_dma_free(imxdmac->imxdma_channel);
+ if (cpu_is_mx21() || cpu_is_mx27()) {
+ for (i = 0; i < IMX_DMA_CHANNELS; i++)
+ free_irq(MX2x_INT_DMACH0 + i, NULL);
+ } else if cpu_is_mx1() {
+ free_irq(MX1_DMA_INT, NULL);
+ free_irq(MX1_DMA_ERR, NULL);
}
kfree(imxdma);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 63540d3e2153..d3e38e28bb6b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -41,6 +42,8 @@
#include <mach/dma.h>
#include <mach/hardware.h>
+#include "dmaengine.h"
+
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
#define SDMA_H_INTR 0x004
@@ -259,19 +262,18 @@ struct sdma_channel {
unsigned int pc_from_device, pc_to_device;
unsigned long flags;
dma_addr_t per_address;
- u32 event_mask0, event_mask1;
- u32 watermark_level;
+ unsigned long event_mask[2];
+ unsigned long watermark_level;
u32 shp_addr, per_addr;
struct dma_chan chan;
spinlock_t lock;
struct dma_async_tx_descriptor desc;
- dma_cookie_t last_completed;
enum dma_status status;
unsigned int chn_count;
unsigned int chn_real_count;
};
-#define IMX_DMA_SG_LOOP (1 << 0)
+#define IMX_DMA_SG_LOOP BIT(0)
#define MAX_DMA_CHANNELS 32
#define MXC_SDMA_DEFAULT_PRIORITY 1
@@ -345,9 +347,9 @@ static const struct of_device_id sdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
-#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
-#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
-#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
+#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
+#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
+#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
@@ -362,37 +364,42 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
{
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
- u32 evt, mcu, dsp;
+ unsigned long evt, mcu, dsp;
if (event_override && mcu_override && dsp_override)
return -EINVAL;
- evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
- mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
- dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
+ evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
+ mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
+ dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
if (dsp_override)
- dsp &= ~(1 << channel);
+ __clear_bit(channel, &dsp);
else
- dsp |= (1 << channel);
+ __set_bit(channel, &dsp);
if (event_override)
- evt &= ~(1 << channel);
+ __clear_bit(channel, &evt);
else
- evt |= (1 << channel);
+ __set_bit(channel, &evt);
if (mcu_override)
- mcu &= ~(1 << channel);
+ __clear_bit(channel, &mcu);
else
- mcu |= (1 << channel);
+ __set_bit(channel, &mcu);
- __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
- __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
- __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
+ writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
+ writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
+ writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
return 0;
}
+static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
+{
+ writel(BIT(channel), sdma->regs + SDMA_H_START);
+}
+
/*
* sdma_run_channel - run a channel and wait till it's done
*/
@@ -404,7 +411,7 @@ static int sdma_run_channel(struct sdma_channel *sdmac)
init_completion(&sdmac->done);
- __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+ sdma_enable_channel(sdma, channel);
ret = wait_for_completion_timeout(&sdmac->done, HZ);
@@ -451,12 +458,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
{
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
- u32 val;
+ unsigned long val;
u32 chnenbl = chnenbl_ofs(sdma, event);
- val = __raw_readl(sdma->regs + chnenbl);
- val |= (1 << channel);
- __raw_writel(val, sdma->regs + chnenbl);
+ val = readl_relaxed(sdma->regs + chnenbl);
+ __set_bit(channel, &val);
+ writel_relaxed(val, sdma->regs + chnenbl);
}
static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
@@ -464,11 +471,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
u32 chnenbl = chnenbl_ofs(sdma, event);
- u32 val;
+ unsigned long val;
- val = __raw_readl(sdma->regs + chnenbl);
- val &= ~(1 << channel);
- __raw_writel(val, sdma->regs + chnenbl);
+ val = readl_relaxed(sdma->regs + chnenbl);
+ __clear_bit(channel, &val);
+ writel_relaxed(val, sdma->regs + chnenbl);
}
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
@@ -522,7 +529,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
else
sdmac->status = DMA_SUCCESS;
- sdmac->last_completed = sdmac->desc.cookie;
+ dma_cookie_complete(&sdmac->desc);
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
}
@@ -544,10 +551,10 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
{
struct sdma_engine *sdma = dev_id;
- u32 stat;
+ unsigned long stat;
- stat = __raw_readl(sdma->regs + SDMA_H_INTR);
- __raw_writel(stat, sdma->regs + SDMA_H_INTR);
+ stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
while (stat) {
int channel = fls(stat) - 1;
@@ -555,7 +562,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
mxc_sdma_handle_channel(sdmac);
- stat &= ~(1 << channel);
+ __clear_bit(channel, &stat);
}
return IRQ_HANDLED;
@@ -663,11 +670,11 @@ static int sdma_load_context(struct sdma_channel *sdmac)
return load_address;
dev_dbg(sdma->dev, "load_address = %d\n", load_address);
- dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
+ dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
- dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
- dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+ dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
+ dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
mutex_lock(&sdma->channel_0_lock);
@@ -677,8 +684,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
/* Send by context the event mask,base address for peripheral
* and watermark level
*/
- context->gReg[0] = sdmac->event_mask1;
- context->gReg[1] = sdmac->event_mask0;
+ context->gReg[0] = sdmac->event_mask[1];
+ context->gReg[1] = sdmac->event_mask[0];
context->gReg[2] = sdmac->per_addr;
context->gReg[6] = sdmac->shp_addr;
context->gReg[7] = sdmac->watermark_level;
@@ -701,7 +708,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac)
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
- __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
+ writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
sdmac->status = DMA_ERROR;
}
@@ -711,13 +718,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
sdma_disable_channel(sdmac);
- sdmac->event_mask0 = 0;
- sdmac->event_mask1 = 0;
+ sdmac->event_mask[0] = 0;
+ sdmac->event_mask[1] = 0;
sdmac->shp_addr = 0;
sdmac->per_addr = 0;
if (sdmac->event_id0) {
- if (sdmac->event_id0 > 32)
+ if (sdmac->event_id0 >= sdmac->sdma->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
}
@@ -740,15 +747,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
/* Handle multiple event channels differently */
if (sdmac->event_id1) {
- sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
+ sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
if (sdmac->event_id1 > 31)
- sdmac->watermark_level |= 1 << 31;
- sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
+ __set_bit(31, &sdmac->watermark_level);
+ sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
if (sdmac->event_id0 > 31)
- sdmac->watermark_level |= 1 << 30;
+ __set_bit(30, &sdmac->watermark_level);
} else {
- sdmac->event_mask0 = 1 << sdmac->event_id0;
- sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
+ __set_bit(sdmac->event_id0, sdmac->event_mask);
}
/* Watermark Level */
sdmac->watermark_level |= sdmac->watermark_level;
@@ -774,7 +780,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
return -EINVAL;
}
- __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
+ writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
return 0;
}
@@ -796,8 +802,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
- clk_enable(sdma->clk);
-
sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
init_completion(&sdmac->done);
@@ -810,24 +814,6 @@ out:
return ret;
}
-static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
-{
- __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
-}
-
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
-{
- dma_cookie_t cookie = sdmac->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- sdmac->chan.cookie = cookie;
- sdmac->desc.cookie = cookie;
-
- return cookie;
-}
-
static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct sdma_channel, chan);
@@ -837,14 +823,11 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
unsigned long flags;
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
- struct sdma_engine *sdma = sdmac->sdma;
dma_cookie_t cookie;
spin_lock_irqsave(&sdmac->lock, flags);
- cookie = sdma_assign_cookie(sdmac);
-
- sdma_enable_channel(sdma, sdmac->channel);
+ cookie = dma_cookie_assign(tx);
spin_unlock_irqrestore(&sdmac->lock, flags);
@@ -875,11 +858,14 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request;
- ret = sdma_set_channel_priority(sdmac, prio);
+
+ clk_enable(sdmac->sdma->clk);
+
+ ret = sdma_request_channel(sdmac);
if (ret)
return ret;
- ret = sdma_request_channel(sdmac);
+ ret = sdma_set_channel_priority(sdmac, prio);
if (ret)
return ret;
@@ -916,7 +902,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1014,7 +1000,8 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1128,7 +1115,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
last_used = chan->cookie;
- dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+ dma_set_tx_state(txstate, chan->completed_cookie, last_used,
sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
@@ -1136,9 +1123,11 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
static void sdma_issue_pending(struct dma_chan *chan)
{
- /*
- * Nothing to do. We only have a single descriptor
- */
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+
+ if (sdmac->status == DMA_IN_PROGRESS)
+ sdma_enable_channel(sdma, sdmac->channel);
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
@@ -1230,7 +1219,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
clk_enable(sdma->clk);
/* Be sure SDMA has not started yet */
- __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
+ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
sdma->channel_control = dma_alloc_coherent(NULL,
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
@@ -1253,11 +1242,11 @@ static int __init sdma_init(struct sdma_engine *sdma)
/* disable all channels */
for (i = 0; i < sdma->num_events; i++)
- __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
+ writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
/* All channels have priority 0 */
for (i = 0; i < MAX_DMA_CHANNELS; i++)
- __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
+ writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
ret = sdma_request_channel(&sdma->channel[0]);
if (ret)
@@ -1266,16 +1255,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
sdma_config_ownership(&sdma->channel[0], false, true, false);
/* Set Command Channel (Channel Zero) */
- __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
+ writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
/* Set bits of CONFIG register but with static context switching */
/* FIXME: Check whether to set ACR bit depending on clock ratios */
- __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
+ writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
- __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
+ writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
/* Set bits of CONFIG register with given context switching mode */
- __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+ writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
/* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7);
@@ -1367,6 +1356,7 @@ static int __init sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdmac->lock);
sdmac->chan.device = &sdma->dma_device;
+ dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
/*
@@ -1387,7 +1377,9 @@ static int __init sdma_probe(struct platform_device *pdev)
sdma_add_scripts(sdma, pdata->script_addrs);
if (pdata) {
- sdma_get_firmware(sdma, pdata->fw_name);
+ ret = sdma_get_firmware(sdma, pdata->fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
} else {
/*
* Because that device tree does not encode ROM script address,
@@ -1396,15 +1388,12 @@ static int __init sdma_probe(struct platform_device *pdev)
*/
ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
&fw_name);
- if (ret) {
- dev_err(&pdev->dev, "failed to get firmware name\n");
- goto err_init;
- }
-
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret) {
- dev_err(&pdev->dev, "failed to get firmware\n");
- goto err_init;
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
+ else {
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
}
}
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 74f70aadf9e4..c900ca7aaec4 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -29,6 +29,8 @@
#include <linux/intel_mid_dma.h>
#include <linux/module.h>
+#include "dmaengine.h"
+
#define MAX_CHAN 4 /*max ch across controllers*/
#include "intel_mid_dma_regs.h"
@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
struct intel_mid_dma_lli *llitem;
void *param_txd = NULL;
- midc->completed = txd->cookie;
+ dma_cookie_complete(txd);
callback_txd = txd->callback;
param_txd = txd->callback_param;
@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
spin_lock_bh(&midc->lock);
- cookie = midc->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- midc->chan.cookie = cookie;
- desc->txd.cookie = cookie;
-
+ cookie = dma_cookie_assign(tx);
if (list_empty(&midc->active_list))
list_add_tail(&desc->desc_node, &midc->active_list);
@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ enum dma_status ret;
- last_complete = midc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
- last_complete = midc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
}
- if (txstate) {
- txstate->last = last_complete;
- txstate->used = last_used;
- txstate->residue = 0;
- }
return ret;
}
@@ -732,13 +714,14 @@ err_desc_get:
* @sg_len: length of sg txn
* @direction: DMA transfer dirtn
* @flags: DMA flags
+ * @context: transfer context (ignored)
*
* Prepares LLI based periphral transfer
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct intel_mid_dma_chan *midc = NULL;
struct intel_mid_dma_slave *mids = NULL;
@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
/*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n");
}
- pm_runtime_put(&mid->pdev->dev);
spin_lock_bh(&midc->lock);
midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
/* Disable CH interrupts */
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
+ pm_runtime_put(&mid->pdev->dev);
}
/**
@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_put(&mid->pdev->dev);
return -EIO;
}
- midc->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
spin_lock_bh(&midc->lock);
while (midc->descs_allocated < DESCS_PER_CHANNEL) {
@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
}
err_status &= mid->intr_mask;
if (err_status) {
- iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
+ iowrite32((err_status << INT_MASK_WE),
+ mid->dma_base + MASK_ERR);
call_tasklet = 1;
}
if (call_tasklet)
@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
struct intel_mid_dma_chan *midch = &dma->ch[i];
midch->chan.device = &dma->common;
- midch->chan.cookie = 1;
+ dma_cookie_init(&midch->chan);
midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index c83d35b97bd8..1bfa9268feaf 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi {
* @dma_base: MMIO register space DMA engine base pointer
* @ch_id: DMA channel id
* @lock: channel spinlock
- * @completed: DMA cookie
* @active_list: current active descriptors
* @queue: current queued up descriptors
* @free_list: current free descriptors
@@ -183,7 +182,6 @@ struct intel_mid_dma_chan {
void __iomem *dma_base;
int ch_id;
spinlock_t lock;
- dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a4d6cb0c0343..31493d80e0e9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -40,6 +40,8 @@
#include "registers.h"
#include "hw.h"
+#include "../dmaengine.h"
+
int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
@@ -107,6 +109,7 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->reg_base = device->reg_base + (0x80 * (idx + 1));
spin_lock_init(&chan->cleanup_lock);
chan->common.device = dma;
+ dma_cookie_init(&chan->common);
list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan;
init_timer(&chan->timer);
@@ -235,12 +238,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&ioat->desc_lock);
/* cookie incr and addition to used_list must be atomic */
- cookie = c->cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- c->cookie = cookie;
- tx->cookie = cookie;
+ cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
/* write address into NextDescriptor field of last desc in chain */
@@ -603,8 +601,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
*/
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
- chan->completed_cookie = tx->cookie;
- tx->cookie = 0;
+ dma_cookie_complete(tx);
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
ioat->active -= desc->hw->tx_cnt;
if (tx->callback) {
@@ -733,13 +730,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
+ enum dma_status ret;
- if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
- return DMA_SUCCESS;
+ ret = dma_cookie_status(c, cookie, txstate);
+ if (ret == DMA_SUCCESS)
+ return ret;
device->cleanup_fn((unsigned long) c);
- return ioat_tx_status(c, cookie, txstate);
+ return dma_cookie_status(c, cookie, txstate);
}
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c8a92a21..c7888bccd974 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -90,7 +90,6 @@ struct ioat_chan_common {
void __iomem *reg_base;
unsigned long last_completion;
spinlock_t cleanup_lock;
- dma_cookie_t completed_cookie;
unsigned long state;
#define IOAT_COMPLETION_PENDING 0
#define IOAT_COMPLETION_ACK 1
@@ -143,28 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
return container_of(chan, struct ioat_dma_chan, base);
}
-/**
- * ioat_tx_status - poll the status of an ioat transaction
- * @c: channel handle
- * @cookie: transaction identifier
- * @txstate: if set, updated with the transaction state
- */
-static inline enum dma_status
-ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct ioat_chan_common *chan = to_chan_common(c);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
-
- last_used = c->cookie;
- last_complete = chan->completed_cookie;
-
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
- return dma_async_is_complete(cookie, last_complete, last_used);
-}
-
/* wrapper around hardware descriptor format + additional software fields */
/**
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d65f8377971..e8e110ff3d96 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -41,6 +41,8 @@
#include "registers.h"
#include "hw.h"
+#include "../dmaengine.h"
+
int ioat_ring_alloc_order = 8;
module_param(ioat_ring_alloc_order, int, 0644);
MODULE_PARM_DESC(ioat_ring_alloc_order,
@@ -147,8 +149,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
- chan->completed_cookie = tx->cookie;
- tx->cookie = 0;
+ dma_cookie_complete(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
@@ -398,13 +399,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
struct dma_chan *c = tx->chan;
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
- dma_cookie_t cookie = c->cookie;
+ dma_cookie_t cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- tx->cookie = cookie;
- c->cookie = cookie;
+ cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index f519c93a61e7..2c4476c0e405 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -61,6 +61,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
+#include "../dmaengine.h"
#include "registers.h"
#include "hw.h"
#include "dma.h"
@@ -277,9 +278,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
dump_desc_dbg(ioat, desc);
tx = &desc->txd;
if (tx->cookie) {
- chan->completed_cookie = tx->cookie;
+ dma_cookie_complete(tx);
ioat3_dma_unmap(ioat, desc, idx + i);
- tx->cookie = 0;
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
@@ -411,13 +411,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ enum dma_status ret;
- if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
- return DMA_SUCCESS;
+ ret = dma_cookie_status(c, cookie, txstate);
+ if (ret == DMA_SUCCESS)
+ return ret;
ioat3_cleanup(ioat);
- return ioat_tx_status(c, cookie, txstate);
+ return dma_cookie_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index faf88b7e1e71..da6c4c2c066a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -36,6 +36,8 @@
#include <mach/adma.h>
+#include "dmaengine.h"
+
#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
#define to_iop_adma_device(dev) \
container_of(dev, struct iop_adma_device, common)
@@ -317,7 +319,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
}
if (cookie > 0) {
- iop_chan->completed_cookie = cookie;
+ iop_chan->common.completed_cookie = cookie;
pr_debug("\tcompleted cookie %d\n", cookie);
}
}
@@ -438,18 +440,6 @@ retry:
return NULL;
}
-static dma_cookie_t
-iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
- struct iop_adma_desc_slot *desc)
-{
- dma_cookie_t cookie = iop_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- iop_chan->common.cookie = desc->async_tx.cookie = cookie;
- return cookie;
-}
-
static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
{
dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
@@ -477,7 +467,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
slots_per_op = grp_start->slots_per_op;
spin_lock_bh(&iop_chan->lock);
- cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
+ cookie = dma_cookie_assign(tx);
old_chain_tail = list_entry(iop_chan->chain.prev,
struct iop_adma_desc_slot, chain_node);
@@ -904,24 +894,15 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- enum dma_status ret;
-
- last_used = chan->cookie;
- last_complete = iop_chan->completed_cookie;
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ int ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
return ret;
iop_adma_slot_cleanup(iop_chan);
- last_used = chan->cookie;
- last_complete = iop_chan->completed_cookie;
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
- return dma_async_is_complete(cookie, last_complete, last_used);
+ return dma_cookie_status(chan, cookie, txstate);
}
static irqreturn_t iop_adma_eot_handler(int irq, void *data)
@@ -1565,6 +1546,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&iop_chan->chain);
INIT_LIST_HEAD(&iop_chan->all_slots);
iop_chan->common.device = dma_dev;
+ dma_cookie_init(&iop_chan->common);
list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
@@ -1642,16 +1624,12 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
iop_desc_set_dest_addr(grp_start, iop_chan, 0);
iop_desc_set_memcpy_src_addr(grp_start, 0);
- cookie = iop_chan->common.cookie;
- cookie++;
- if (cookie <= 1)
- cookie = 2;
+ cookie = dma_cookie_assign(&sw_desc->async_tx);
/* initialize the completed cookie to be less than
* the most recently used cookie
*/
- iop_chan->completed_cookie = cookie - 1;
- iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+ iop_chan->common.completed_cookie = cookie - 1;
/* channel should not be busy */
BUG_ON(iop_chan_is_busy(iop_chan));
@@ -1699,16 +1677,12 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
iop_desc_set_xor_src_addr(grp_start, 0, 0);
iop_desc_set_xor_src_addr(grp_start, 1, 0);
- cookie = iop_chan->common.cookie;
- cookie++;
- if (cookie <= 1)
- cookie = 2;
+ cookie = dma_cookie_assign(&sw_desc->async_tx);
/* initialize the completed cookie to be less than
* the most recently used cookie
*/
- iop_chan->completed_cookie = cookie - 1;
- iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+ iop_chan->common.completed_cookie = cookie - 1;
/* channel should not be busy */
BUG_ON(iop_chan_is_busy(iop_chan));
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 6212b16e8cf2..62e3f8ec2461 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -25,6 +25,7 @@
#include <mach/ipu.h>
+#include "../dmaengine.h"
#include "ipu_intern.h"
#define FS_VF_IN_VALID 0x00000002
@@ -866,14 +867,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
- cookie = ichan->dma_chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- /* from dmaengine.h: "last cookie value returned to client" */
- ichan->dma_chan.cookie = cookie;
- tx->cookie = cookie;
+ cookie = dma_cookie_assign(tx);
/* ipu->lock can be taken under ichan->lock, but not v.v. */
spin_lock_irqsave(&ichan->lock, flags);
@@ -1295,7 +1289,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
/* Flip the active buffer - even if update above failed */
ichan->active_buffer = !ichan->active_buffer;
if (done)
- ichan->completed = desc->txd.cookie;
+ dma_cookie_complete(&desc->txd);
callback = desc->txd.callback;
callback_param = desc->txd.callback_param;
@@ -1341,7 +1335,8 @@ static void ipu_gc_tasklet(unsigned long arg)
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long tx_flags)
+ enum dma_transfer_direction direction, unsigned long tx_flags,
+ void *context)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
@@ -1510,8 +1505,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
BUG_ON(chan->client_count > 1);
WARN_ON(ichan->status != IPU_CHANNEL_FREE);
- chan->cookie = 1;
- ichan->completed = -ENXIO;
+ dma_cookie_init(chan);
ret = ipu_irq_map(chan->chan_id);
if (ret < 0)
@@ -1600,9 +1594,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- struct idmac_channel *ichan = to_idmac_chan(chan);
-
- dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
if (cookie != chan->cookie)
return DMA_ERROR;
return DMA_SUCCESS;
@@ -1638,11 +1630,10 @@ static int __init ipu_idmac_init(struct ipu *ipu)
ichan->status = IPU_CHANNEL_FREE;
ichan->sec_chan_en = false;
- ichan->completed = -ENXIO;
snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
dma_chan->device = &idmac->dma;
- dma_chan->cookie = 1;
+ dma_cookie_init(dma_chan);
dma_chan->chan_id = i;
list_add_tail(&dma_chan->device_node, &dma->channels);
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4d6d4cf66949..2ab0a3d0eed5 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -44,6 +44,8 @@
#include <linux/random.h>
+#include "dmaengine.h"
+
/* Number of DMA Transfer descriptors allocated per channel */
#define MPC_DMA_DESCRIPTORS 64
@@ -188,7 +190,6 @@ struct mpc_dma_chan {
struct list_head completed;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
- dma_cookie_t completed_cookie;
/* Lock for this structure */
spinlock_t lock;
@@ -365,7 +366,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
/* Free descriptors */
spin_lock_irqsave(&mchan->lock, flags);
list_splice_tail_init(&list, &mchan->free);
- mchan->completed_cookie = last_cookie;
+ mchan->chan.completed_cookie = last_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
}
}
@@ -438,13 +439,7 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
mpc_dma_execute(mchan);
/* Update cookie */
- cookie = mchan->chan.cookie + 1;
- if (cookie <= 0)
- cookie = 1;
-
- mchan->chan.cookie = cookie;
- mdesc->desc.cookie = cookie;
-
+ cookie = dma_cookie_assign(txd);
spin_unlock_irqrestore(&mchan->lock, flags);
return cookie;
@@ -562,17 +557,14 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ enum dma_status ret;
unsigned long flags;
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
spin_lock_irqsave(&mchan->lock, flags);
- last_used = mchan->chan.cookie;
- last_complete = mchan->completed_cookie;
+ ret = dma_cookie_status(chan, cookie, txstate);
spin_unlock_irqrestore(&mchan->lock, flags);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- return dma_async_is_complete(cookie, last_complete, last_used);
+ return ret;
}
/* Prepare descriptor for memory to memory copy */
@@ -741,8 +733,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
mchan = &mdma->channels[i];
mchan->chan.device = dma;
- mchan->chan.cookie = 1;
- mchan->completed_cookie = mchan->chan.cookie;
+ dma_cookie_init(&mchan->chan);
INIT_LIST_HEAD(&mchan->free);
INIT_LIST_HEAD(&mchan->prepared);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e779b434af45..fa5d55fea46c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,8 @@
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <plat/mv_xor.h>
+
+#include "dmaengine.h"
#include "mv_xor.h"
static void mv_xor_issue_pending(struct dma_chan *chan);
@@ -435,7 +437,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
}
if (cookie > 0)
- mv_chan->completed_cookie = cookie;
+ mv_chan->common.completed_cookie = cookie;
}
static void
@@ -534,18 +536,6 @@ retry:
return NULL;
}
-static dma_cookie_t
-mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
- struct mv_xor_desc_slot *desc)
-{
- dma_cookie_t cookie = mv_chan->common.cookie;
-
- if (++cookie < 0)
- cookie = 1;
- mv_chan->common.cookie = desc->async_tx.cookie = cookie;
- return cookie;
-}
-
/************************ DMA engine API functions ****************************/
static dma_cookie_t
mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -563,7 +553,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
grp_start = sw_desc->group_head;
spin_lock_bh(&mv_chan->lock);
- cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
+ cookie = dma_cookie_assign(tx);
if (list_empty(&mv_chan->chain))
list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
@@ -820,27 +810,16 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
enum dma_status ret;
- last_used = chan->cookie;
- last_complete = mv_chan->completed_cookie;
- mv_chan->is_complete_cookie = cookie;
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS) {
mv_xor_clean_completed_slots(mv_chan);
return ret;
}
mv_xor_slot_cleanup(mv_chan);
- last_used = chan->cookie;
- last_complete = mv_chan->completed_cookie;
-
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- return dma_async_is_complete(cookie, last_complete, last_used);
+ return dma_cookie_status(chan, cookie, txstate);
}
static void mv_dump_xor_regs(struct mv_xor_chan *chan)
@@ -1214,6 +1193,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->all_slots);
mv_chan->common.device = dma_dev;
+ dma_cookie_init(&mv_chan->common);
list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 977b592e976b..654876b7ba1d 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -78,7 +78,6 @@ struct mv_xor_device {
/**
* struct mv_xor_chan - internal representation of a XOR channel
* @pending: allows batching of hardware operations
- * @completed_cookie: identifier for the most recently completed operation
* @lock: serializes enqueue/dequeue operations to the descriptors pool
* @mmr_base: memory mapped register base
* @idx: the index of the xor channel
@@ -93,7 +92,6 @@ struct mv_xor_device {
*/
struct mv_xor_chan {
int pending;
- dma_cookie_t completed_cookie;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
unsigned int idx;
@@ -109,7 +107,6 @@ struct mv_xor_chan {
#ifdef USE_TIMER
unsigned long cleanup_time;
u32 current_on_last_cleanup;
- dma_cookie_t is_complete_cookie;
#endif
};
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b06cd4ca626f..c81ef7e10e08 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,12 +22,14 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/fsl/mxs-dma.h>
#include <asm/irq.h>
#include <mach/mxs.h>
-#include <mach/dma.h>
#include <mach/common.h>
+#include "dmaengine.h"
+
/*
* NOTE: The term "PIO" throughout the mxs-dma implementation means
* PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
@@ -111,7 +113,6 @@ struct mxs_dma_chan {
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
int desc_count;
- dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
#define MXS_DMA_SG_LOOP (1 << 0)
@@ -193,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
mxs_chan->status = DMA_IN_PROGRESS;
}
-static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
-{
- dma_cookie_t cookie = mxs_chan->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- mxs_chan->chan.cookie = cookie;
- mxs_chan->desc.cookie = cookie;
-
- return cookie;
-}
-
static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
{
return container_of(chan, struct mxs_dma_chan, chan);
@@ -217,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
mxs_dma_enable_chan(mxs_chan);
- return mxs_dma_assign_cookie(mxs_chan);
+ return dma_cookie_assign(tx);
}
static void mxs_dma_tasklet(unsigned long data)
@@ -274,7 +262,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
stat1 &= ~(1 << channel);
if (mxs_chan->status == DMA_SUCCESS)
- mxs_chan->last_completed = mxs_chan->desc.cookie;
+ dma_cookie_complete(&mxs_chan->desc);
/* schedule tasklet on this channel */
tasklet_schedule(&mxs_chan->tasklet);
@@ -349,10 +337,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
clk_disable_unprepare(mxs_dma->clk);
}
+/*
+ * How to use the flags for ->device_prep_slave_sg() :
+ * [1] If there is only one DMA command in the DMA chain, the code should be:
+ * ......
+ * ->device_prep_slave_sg(DMA_CTRL_ACK);
+ * ......
+ * [2] If there are two DMA commands in the DMA chain, the code should be
+ * ......
+ * ->device_prep_slave_sg(0);
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ * ......
+ * [3] If there are more than two DMA commands in the DMA chain, the code
+ * should be:
+ * ......
+ * ->device_prep_slave_sg(0); // First
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ * ......
+ */
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long append)
+ unsigned long flags, void *context)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -360,6 +370,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
+ bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -386,7 +397,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits &= ~CCW_IRQ;
ccw->bits &= ~CCW_DEC_SEM;
- ccw->bits &= ~CCW_WAIT4END;
} else {
idx = 0;
}
@@ -401,7 +411,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits = 0;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- ccw->bits |= CCW_WAIT4END;
+ if (flags & DMA_CTRL_ACK)
+ ccw->bits |= CCW_WAIT4END;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -432,7 +443,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits &= ~CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- ccw->bits |= CCW_WAIT4END;
+ if (flags & DMA_CTRL_ACK)
+ ccw->bits |= CCW_WAIT4END;
}
}
}
@@ -447,7 +459,8 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -538,7 +551,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t last_used;
last_used = chan->cookie;
- dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
return mxs_chan->status;
}
@@ -630,6 +643,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_chan->mxs_dma = mxs_dma;
mxs_chan->chan.device = &mxs_dma->dma_device;
+ dma_cookie_init(&mxs_chan->chan);
tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
(unsigned long) mxs_chan);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 823f58179f9d..65c0495a6d40 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/pch_dma.h>
+#include "dmaengine.h"
+
#define DRV_NAME "pch-dma"
#define DMA_CTL0_DISABLE 0x0
@@ -105,7 +107,6 @@ struct pch_dma_chan {
spinlock_t lock;
- dma_cookie_t completed_cookie;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
@@ -416,20 +417,6 @@ static void pdc_advance_work(struct pch_dma_chan *pd_chan)
}
}
-static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
- struct pch_dma_desc *desc)
-{
- dma_cookie_t cookie = pd_chan->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- pd_chan->chan.cookie = cookie;
- desc->txd.cookie = cookie;
-
- return cookie;
-}
-
static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct pch_dma_desc *desc = to_pd_desc(txd);
@@ -437,7 +424,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
dma_cookie_t cookie;
spin_lock(&pd_chan->lock);
- cookie = pdc_assign_cookie(pd_chan, desc);
+ cookie = dma_cookie_assign(txd);
if (list_empty(&pd_chan->active_list)) {
list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -544,7 +531,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irq(&pd_chan->lock);
list_splice(&tmp_list, &pd_chan->free_list);
pd_chan->descs_allocated = i;
- pd_chan->completed_cookie = chan->cookie = 1;
+ dma_cookie_init(chan);
spin_unlock_irq(&pd_chan->lock);
pdc_enable_irq(chan, 1);
@@ -578,19 +565,12 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_completed;
- int ret;
+ enum dma_status ret;
spin_lock_irq(&pd_chan->lock);
- last_completed = pd_chan->completed_cookie;
- last_used = chan->cookie;
+ ret = dma_cookie_status(chan, cookie, txstate);
spin_unlock_irq(&pd_chan->lock);
- ret = dma_async_is_complete(cookie, last_completed, last_used);
-
- dma_set_tx_state(txstate, last_completed, last_used, 0);
-
return ret;
}
@@ -607,7 +587,8 @@ static void pd_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma_slave *pd_slave = chan->private;
@@ -932,7 +913,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
struct pch_dma_chan *pd_chan = &pd->channels[i];
pd_chan->chan.device = &pd->dma;
- pd_chan->chan.cookie = 1;
+ dma_cookie_init(&pd_chan->chan);
pd_chan->membase = &regs->desc[i];
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 16b66c827f19..282caf118be8 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1,4 +1,6 @@
-/* linux/drivers/dma/pl330.c
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
@@ -9,10 +11,15 @@
* (at your option) any later version.
*/
+#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
@@ -21,8 +28,497 @@
#include <linux/scatterlist.h>
#include <linux/of.h>
+#include "dmaengine.h"
+#define PL330_MAX_CHAN 8
+#define PL330_MAX_IRQS 32
+#define PL330_MAX_PERI 32
+
+enum pl330_srccachectrl {
+ SCCTRL0, /* Noncacheable and nonbufferable */
+ SCCTRL1, /* Bufferable only */
+ SCCTRL2, /* Cacheable, but do not allocate */
+ SCCTRL3, /* Cacheable and bufferable, but do not allocate */
+ SINVALID1,
+ SINVALID2,
+ SCCTRL6, /* Cacheable write-through, allocate on reads only */
+ SCCTRL7, /* Cacheable write-back, allocate on reads only */
+};
+
+enum pl330_dstcachectrl {
+ DCCTRL0, /* Noncacheable and nonbufferable */
+ DCCTRL1, /* Bufferable only */
+ DCCTRL2, /* Cacheable, but do not allocate */
+ DCCTRL3, /* Cacheable and bufferable, but do not allocate */
+ DINVALID1, /* AWCACHE = 0x1000 */
+ DINVALID2,
+ DCCTRL6, /* Cacheable write-through, allocate on writes only */
+ DCCTRL7, /* Cacheable write-back, allocate on writes only */
+};
+
+enum pl330_byteswap {
+ SWAP_NO,
+ SWAP_2,
+ SWAP_4,
+ SWAP_8,
+ SWAP_16,
+};
+
+enum pl330_reqtype {
+ MEMTOMEM,
+ MEMTODEV,
+ DEVTOMEM,
+ DEVTODEV,
+};
+
+/* Register and Bit field Definitions */
+#define DS 0x0
+#define DS_ST_STOP 0x0
+#define DS_ST_EXEC 0x1
+#define DS_ST_CMISS 0x2
+#define DS_ST_UPDTPC 0x3
+#define DS_ST_WFE 0x4
+#define DS_ST_ATBRR 0x5
+#define DS_ST_QBUSY 0x6
+#define DS_ST_WFP 0x7
+#define DS_ST_KILL 0x8
+#define DS_ST_CMPLT 0x9
+#define DS_ST_FLTCMP 0xe
+#define DS_ST_FAULT 0xf
+
+#define DPC 0x4
+#define INTEN 0x20
+#define ES 0x24
+#define INTSTATUS 0x28
+#define INTCLR 0x2c
+#define FSM 0x30
+#define FSC 0x34
+#define FTM 0x38
+
+#define _FTC 0x40
+#define FTC(n) (_FTC + (n)*0x4)
+
+#define _CS 0x100
+#define CS(n) (_CS + (n)*0x8)
+#define CS_CNS (1 << 21)
+
+#define _CPC 0x104
+#define CPC(n) (_CPC + (n)*0x8)
+
+#define _SA 0x400
+#define SA(n) (_SA + (n)*0x20)
+
+#define _DA 0x404
+#define DA(n) (_DA + (n)*0x20)
+
+#define _CC 0x408
+#define CC(n) (_CC + (n)*0x20)
+
+#define CC_SRCINC (1 << 0)
+#define CC_DSTINC (1 << 14)
+#define CC_SRCPRI (1 << 8)
+#define CC_DSTPRI (1 << 22)
+#define CC_SRCNS (1 << 9)
+#define CC_DSTNS (1 << 23)
+#define CC_SRCIA (1 << 10)
+#define CC_DSTIA (1 << 24)
+#define CC_SRCBRSTLEN_SHFT 4
+#define CC_DSTBRSTLEN_SHFT 18
+#define CC_SRCBRSTSIZE_SHFT 1
+#define CC_DSTBRSTSIZE_SHFT 15
+#define CC_SRCCCTRL_SHFT 11
+#define CC_SRCCCTRL_MASK 0x7
+#define CC_DSTCCTRL_SHFT 25
+#define CC_DRCCCTRL_MASK 0x7
+#define CC_SWAP_SHFT 28
+
+#define _LC0 0x40c
+#define LC0(n) (_LC0 + (n)*0x20)
+
+#define _LC1 0x410
+#define LC1(n) (_LC1 + (n)*0x20)
+
+#define DBGSTATUS 0xd00
+#define DBG_BUSY (1 << 0)
+
+#define DBGCMD 0xd04
+#define DBGINST0 0xd08
+#define DBGINST1 0xd0c
+
+#define CR0 0xe00
+#define CR1 0xe04
+#define CR2 0xe08
+#define CR3 0xe0c
+#define CR4 0xe10
+#define CRD 0xe14
+
+#define PERIPH_ID 0xfe0
+#define PERIPH_REV_SHIFT 20
+#define PERIPH_REV_MASK 0xf
+#define PERIPH_REV_R0P0 0
+#define PERIPH_REV_R1P0 1
+#define PERIPH_REV_R1P1 2
+#define PCELL_ID 0xff0
+
+#define CR0_PERIPH_REQ_SET (1 << 0)
+#define CR0_BOOT_EN_SET (1 << 1)
+#define CR0_BOOT_MAN_NS (1 << 2)
+#define CR0_NUM_CHANS_SHIFT 4
+#define CR0_NUM_CHANS_MASK 0x7
+#define CR0_NUM_PERIPH_SHIFT 12
+#define CR0_NUM_PERIPH_MASK 0x1f
+#define CR0_NUM_EVENTS_SHIFT 17
+#define CR0_NUM_EVENTS_MASK 0x1f
+
+#define CR1_ICACHE_LEN_SHIFT 0
+#define CR1_ICACHE_LEN_MASK 0x7
+#define CR1_NUM_ICACHELINES_SHIFT 4
+#define CR1_NUM_ICACHELINES_MASK 0xf
+
+#define CRD_DATA_WIDTH_SHIFT 0
+#define CRD_DATA_WIDTH_MASK 0x7
+#define CRD_WR_CAP_SHIFT 4
+#define CRD_WR_CAP_MASK 0x7
+#define CRD_WR_Q_DEP_SHIFT 8
+#define CRD_WR_Q_DEP_MASK 0xf
+#define CRD_RD_CAP_SHIFT 12
+#define CRD_RD_CAP_MASK 0x7
+#define CRD_RD_Q_DEP_SHIFT 16
+#define CRD_RD_Q_DEP_MASK 0xf
+#define CRD_DATA_BUFF_SHIFT 20
+#define CRD_DATA_BUFF_MASK 0x3ff
+
+#define PART 0x330
+#define DESIGNER 0x41
+#define REVISION 0x0
+#define INTEG_CFG 0x0
+#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
+
+#define PCELL_ID_VAL 0xb105f00d
+
+#define PL330_STATE_STOPPED (1 << 0)
+#define PL330_STATE_EXECUTING (1 << 1)
+#define PL330_STATE_WFE (1 << 2)
+#define PL330_STATE_FAULTING (1 << 3)
+#define PL330_STATE_COMPLETING (1 << 4)
+#define PL330_STATE_WFP (1 << 5)
+#define PL330_STATE_KILLING (1 << 6)
+#define PL330_STATE_FAULT_COMPLETING (1 << 7)
+#define PL330_STATE_CACHEMISS (1 << 8)
+#define PL330_STATE_UPDTPC (1 << 9)
+#define PL330_STATE_ATBARRIER (1 << 10)
+#define PL330_STATE_QUEUEBUSY (1 << 11)
+#define PL330_STATE_INVALID (1 << 15)
+
+#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
+ | PL330_STATE_WFE | PL330_STATE_FAULTING)
+
+#define CMD_DMAADDH 0x54
+#define CMD_DMAEND 0x00
+#define CMD_DMAFLUSHP 0x35
+#define CMD_DMAGO 0xa0
+#define CMD_DMALD 0x04
+#define CMD_DMALDP 0x25
+#define CMD_DMALP 0x20
+#define CMD_DMALPEND 0x28
+#define CMD_DMAKILL 0x01
+#define CMD_DMAMOV 0xbc
+#define CMD_DMANOP 0x18
+#define CMD_DMARMB 0x12
+#define CMD_DMASEV 0x34
+#define CMD_DMAST 0x08
+#define CMD_DMASTP 0x29
+#define CMD_DMASTZ 0x0c
+#define CMD_DMAWFE 0x36
+#define CMD_DMAWFP 0x30
+#define CMD_DMAWMB 0x13
+
+#define SZ_DMAADDH 3
+#define SZ_DMAEND 1
+#define SZ_DMAFLUSHP 2
+#define SZ_DMALD 1
+#define SZ_DMALDP 2
+#define SZ_DMALP 2
+#define SZ_DMALPEND 2
+#define SZ_DMAKILL 1
+#define SZ_DMAMOV 6
+#define SZ_DMANOP 1
+#define SZ_DMARMB 1
+#define SZ_DMASEV 2
+#define SZ_DMAST 1
+#define SZ_DMASTP 2
+#define SZ_DMASTZ 1
+#define SZ_DMAWFE 2
+#define SZ_DMAWFP 2
+#define SZ_DMAWMB 1
+#define SZ_DMAGO 6
+
+#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
+#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
+
+#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
+#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
+
+/*
+ * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
+ * at 1byte/burst for P<->M and M<->M respectively.
+ * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
+ * should be enough for P<->M and M<->M respectively.
+ */
+#define MCODE_BUFF_PER_REQ 256
+
+/* If the _pl330_req is available to the client */
+#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
+
+/* Use this _only_ to wait on transient states */
+#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
+
+#ifdef PL330_DEBUG_MCGEN
+static unsigned cmd_line;
+#define PL330_DBGCMD_DUMP(off, x...) do { \
+ printk("%x:", cmd_line); \
+ printk(x); \
+ cmd_line += off; \
+ } while (0)
+#define PL330_DBGMC_START(addr) (cmd_line = addr)
+#else
+#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
+#define PL330_DBGMC_START(addr) do {} while (0)
+#endif
+
+/* The number of default descriptors */
+
#define NR_DEFAULT_DESC 16
+/* Populated by the PL330 core driver for DMA API driver's info */
+struct pl330_config {
+ u32 periph_id;
+ u32 pcell_id;
+#define DMAC_MODE_NS (1 << 0)
+ unsigned int mode;
+ unsigned int data_bus_width:10; /* In number of bits */
+ unsigned int data_buf_dep:10;
+ unsigned int num_chan:4;
+ unsigned int num_peri:6;
+ u32 peri_ns;
+ unsigned int num_events:6;
+ u32 irq_ns;
+};
+
+/* Handle to the DMAC provided to the PL330 core */
+struct pl330_info {
+ /* Owning device */
+ struct device *dev;
+ /* Size of MicroCode buffers for each channel. */
+ unsigned mcbufsz;
+ /* ioremap'ed address of PL330 registers. */
+ void __iomem *base;
+ /* Client can freely use it. */
+ void *client_data;
+ /* PL330 core data, Client must not touch it. */
+ void *pl330_data;
+ /* Populated by the PL330 core driver during pl330_add */
+ struct pl330_config pcfg;
+ /*
+ * If the DMAC has some reset mechanism, then the
+ * client may want to provide pointer to the method.
+ */
+ void (*dmac_reset)(struct pl330_info *pi);
+};
+
+/**
+ * Request Configuration.
+ * The PL330 core does not modify this and uses the last
+ * working configuration if the request doesn't provide any.
+ *
+ * The Client may want to provide this info only for the
+ * first request and a request with new settings.
+ */
+struct pl330_reqcfg {
+ /* Address Incrementing */
+ unsigned dst_inc:1;
+ unsigned src_inc:1;
+
+ /*
+ * For now, the SRC & DST protection levels
+ * and burst size/length are assumed same.
+ */
+ bool nonsecure;
+ bool privileged;
+ bool insnaccess;
+ unsigned brst_len:5;
+ unsigned brst_size:3; /* in power of 2 */
+
+ enum pl330_dstcachectrl dcctl;
+ enum pl330_srccachectrl scctl;
+ enum pl330_byteswap swap;
+ struct pl330_config *pcfg;
+};
+
+/*
+ * One cycle of DMAC operation.
+ * There may be more than one xfer in a request.
+ */
+struct pl330_xfer {
+ u32 src_addr;
+ u32 dst_addr;
+ /* Size to xfer */
+ u32 bytes;
+ /*
+ * Pointer to next xfer in the list.
+ * The last xfer in the req must point to NULL.
+ */
+ struct pl330_xfer *next;
+};
+
+/* The xfer callbacks are made with one of these arguments. */
+enum pl330_op_err {
+ /* The all xfers in the request were success. */
+ PL330_ERR_NONE,
+ /* If req aborted due to global error. */
+ PL330_ERR_ABORT,
+ /* If req failed due to problem with Channel. */
+ PL330_ERR_FAIL,
+};
+
+/* A request defining Scatter-Gather List ending with NULL xfer. */
+struct pl330_req {
+ enum pl330_reqtype rqtype;
+ /* Index of peripheral for the xfer. */
+ unsigned peri:5;
+ /* Unique token for this xfer, set by the client. */
+ void *token;
+ /* Callback to be called after xfer. */
+ void (*xfer_cb)(void *token, enum pl330_op_err err);
+ /* If NULL, req will be done at last set parameters. */
+ struct pl330_reqcfg *cfg;
+ /* Pointer to first xfer in the request. */
+ struct pl330_xfer *x;
+};
+
+/*
+ * To know the status of the channel and DMAC, the client
+ * provides a pointer to this structure. The PL330 core
+ * fills it with current information.
+ */
+struct pl330_chanstatus {
+ /*
+ * If the DMAC engine halted due to some error,
+ * the client should remove-add DMAC.
+ */
+ bool dmac_halted;
+ /*
+ * If channel is halted due to some error,
+ * the client should ABORT/FLUSH and START the channel.
+ */
+ bool faulting;
+ /* Location of last load */
+ u32 src_addr;
+ /* Location of last store */
+ u32 dst_addr;
+ /*
+ * Pointer to the currently active req, NULL if channel is
+ * inactive, even though the requests may be present.
+ */
+ struct pl330_req *top_req;
+ /* Pointer to req waiting second in the queue if any. */
+ struct pl330_req *wait_req;
+};
+
+enum pl330_chan_op {
+ /* Start the channel */
+ PL330_OP_START,
+ /* Abort the active xfer */
+ PL330_OP_ABORT,
+ /* Stop xfer and flush queue */
+ PL330_OP_FLUSH,
+};
+
+struct _xfer_spec {
+ u32 ccr;
+ struct pl330_req *r;
+ struct pl330_xfer *x;
+};
+
+enum dmamov_dst {
+ SAR = 0,
+ CCR,
+ DAR,
+};
+
+enum pl330_dst {
+ SRC = 0,
+ DST,
+};
+
+enum pl330_cond {
+ SINGLE,
+ BURST,
+ ALWAYS,
+};
+
+struct _pl330_req {
+ u32 mc_bus;
+ void *mc_cpu;
+ /* Number of bytes taken to setup MC for the req */
+ u32 mc_len;
+ struct pl330_req *r;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
+};
+
+/* ToBeDone for tasklet */
+struct _pl330_tbd {
+ bool reset_dmac;
+ bool reset_mngr;
+ u8 reset_chan;
+};
+
+/* A DMAC Thread */
+struct pl330_thread {
+ u8 id;
+ int ev;
+ /* If the channel is not yet acquired by any client */
+ bool free;
+ /* Parent DMAC */
+ struct pl330_dmac *dmac;
+ /* Only two at a time */
+ struct _pl330_req req[2];
+ /* Index of the last enqueued request */
+ unsigned lstenq;
+ /* Index of the last submitted request or -1 if the DMA is stopped */
+ int req_running;
+};
+
+enum pl330_dmac_state {
+ UNINIT,
+ INIT,
+ DYING,
+};
+
+/* A DMAC */
+struct pl330_dmac {
+ spinlock_t lock;
+ /* Holds list of reqs with due callbacks */
+ struct list_head req_done;
+ /* Pointer to platform specific stuff */
+ struct pl330_info *pinfo;
+ /* Maximum possible events/irqs */
+ int events[32];
+ /* BUS address of MicroCode buffer */
+ u32 mcode_bus;
+ /* CPU address of MicroCode buffer */
+ void *mcode_cpu;
+ /* List of all Channel threads */
+ struct pl330_thread *channels;
+ /* Pointer to the MANAGER thread */
+ struct pl330_thread *manager;
+ /* To handle bad news in interrupt */
+ struct tasklet_struct tasks;
+ struct _pl330_tbd dmac_tbd;
+ /* State of DMAC operation */
+ enum pl330_dmac_state state;
+};
+
enum desc_status {
/* In the DMAC pool */
FREE,
@@ -51,9 +547,6 @@ struct dma_pl330_chan {
/* DMA-Engine Channel */
struct dma_chan chan;
- /* Last completed cookie */
- dma_cookie_t completed;
-
/* List of to be xfered descriptors */
struct list_head work_list;
@@ -117,6 +610,1599 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
+{
+ if (r && r->xfer_cb)
+ r->xfer_cb(r->token, err);
+}
+
+static inline bool _queue_empty(struct pl330_thread *thrd)
+{
+ return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
+ ? true : false;
+}
+
+static inline bool _queue_full(struct pl330_thread *thrd)
+{
+ return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
+ ? false : true;
+}
+
+static inline bool is_manager(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+
+ /* MANAGER is indexed at the end */
+ if (thrd->id == pl330->pinfo->pcfg.num_chan)
+ return true;
+ else
+ return false;
+}
+
+/* If manager of the thread is in Non-Secure mode */
+static inline bool _manager_ns(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+
+ return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
+}
+
+static inline u32 get_id(struct pl330_info *pi, u32 off)
+{
+ void __iomem *regs = pi->base;
+ u32 id = 0;
+
+ id |= (readb(regs + off + 0x0) << 0);
+ id |= (readb(regs + off + 0x4) << 8);
+ id |= (readb(regs + off + 0x8) << 16);
+ id |= (readb(regs + off + 0xc) << 24);
+
+ return id;
+}
+
+static inline u32 get_revision(u32 periph_id)
+{
+ return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
+}
+
+static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
+ enum pl330_dst da, u16 val)
+{
+ if (dry_run)
+ return SZ_DMAADDH;
+
+ buf[0] = CMD_DMAADDH;
+ buf[0] |= (da << 1);
+ *((u16 *)&buf[1]) = val;
+
+ PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
+ da == 1 ? "DA" : "SA", val);
+
+ return SZ_DMAADDH;
+}
+
+static inline u32 _emit_END(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMAEND;
+
+ buf[0] = CMD_DMAEND;
+
+ PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
+
+ return SZ_DMAEND;
+}
+
+static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
+{
+ if (dry_run)
+ return SZ_DMAFLUSHP;
+
+ buf[0] = CMD_DMAFLUSHP;
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
+
+ return SZ_DMAFLUSHP;
+}
+
+static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
+{
+ if (dry_run)
+ return SZ_DMALD;
+
+ buf[0] = CMD_DMALD;
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (1 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (1 << 0);
+
+ PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
+
+ return SZ_DMALD;
+}
+
+static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
+ enum pl330_cond cond, u8 peri)
+{
+ if (dry_run)
+ return SZ_DMALDP;
+
+ buf[0] = CMD_DMALDP;
+
+ if (cond == BURST)
+ buf[0] |= (1 << 1);
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
+ cond == SINGLE ? 'S' : 'B', peri >> 3);
+
+ return SZ_DMALDP;
+}
+
+static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
+ unsigned loop, u8 cnt)
+{
+ if (dry_run)
+ return SZ_DMALP;
+
+ buf[0] = CMD_DMALP;
+
+ if (loop)
+ buf[0] |= (1 << 1);
+
+ cnt--; /* DMAC increments by 1 internally */
+ buf[1] = cnt;
+
+ PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
+
+ return SZ_DMALP;
+}
+
+struct _arg_LPEND {
+ enum pl330_cond cond;
+ bool forever;
+ unsigned loop;
+ u8 bjump;
+};
+
+static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
+ const struct _arg_LPEND *arg)
+{
+ enum pl330_cond cond = arg->cond;
+ bool forever = arg->forever;
+ unsigned loop = arg->loop;
+ u8 bjump = arg->bjump;
+
+ if (dry_run)
+ return SZ_DMALPEND;
+
+ buf[0] = CMD_DMALPEND;
+
+ if (loop)
+ buf[0] |= (1 << 2);
+
+ if (!forever)
+ buf[0] |= (1 << 4);
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (1 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (1 << 0);
+
+ buf[1] = bjump;
+
+ PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
+ forever ? "FE" : "END",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
+ loop ? '1' : '0',
+ bjump);
+
+ return SZ_DMALPEND;
+}
+
+static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMAKILL;
+
+ buf[0] = CMD_DMAKILL;
+
+ return SZ_DMAKILL;
+}
+
+static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
+ enum dmamov_dst dst, u32 val)
+{
+ if (dry_run)
+ return SZ_DMAMOV;
+
+ buf[0] = CMD_DMAMOV;
+ buf[1] = dst;
+ *((u32 *)&buf[2]) = val;
+
+ PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
+ dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
+
+ return SZ_DMAMOV;
+}
+
+static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMANOP;
+
+ buf[0] = CMD_DMANOP;
+
+ PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
+
+ return SZ_DMANOP;
+}
+
+static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMARMB;
+
+ buf[0] = CMD_DMARMB;
+
+ PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
+
+ return SZ_DMARMB;
+}
+
+static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
+{
+ if (dry_run)
+ return SZ_DMASEV;
+
+ buf[0] = CMD_DMASEV;
+
+ ev &= 0x1f;
+ ev <<= 3;
+ buf[1] = ev;
+
+ PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
+
+ return SZ_DMASEV;
+}
+
+static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
+{
+ if (dry_run)
+ return SZ_DMAST;
+
+ buf[0] = CMD_DMAST;
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (1 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (1 << 0);
+
+ PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
+
+ return SZ_DMAST;
+}
+
+static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
+ enum pl330_cond cond, u8 peri)
+{
+ if (dry_run)
+ return SZ_DMASTP;
+
+ buf[0] = CMD_DMASTP;
+
+ if (cond == BURST)
+ buf[0] |= (1 << 1);
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
+ cond == SINGLE ? 'S' : 'B', peri >> 3);
+
+ return SZ_DMASTP;
+}
+
+static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMASTZ;
+
+ buf[0] = CMD_DMASTZ;
+
+ PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
+
+ return SZ_DMASTZ;
+}
+
+static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
+ unsigned invalidate)
+{
+ if (dry_run)
+ return SZ_DMAWFE;
+
+ buf[0] = CMD_DMAWFE;
+
+ ev &= 0x1f;
+ ev <<= 3;
+ buf[1] = ev;
+
+ if (invalidate)
+ buf[1] |= (1 << 1);
+
+ PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
+ ev >> 3, invalidate ? ", I" : "");
+
+ return SZ_DMAWFE;
+}
+
+static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
+ enum pl330_cond cond, u8 peri)
+{
+ if (dry_run)
+ return SZ_DMAWFP;
+
+ buf[0] = CMD_DMAWFP;
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (0 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (0 << 0);
+ else
+ buf[0] |= (0 << 1) | (1 << 0);
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
+
+ return SZ_DMAWFP;
+}
+
+static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMAWMB;
+
+ buf[0] = CMD_DMAWMB;
+
+ PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
+
+ return SZ_DMAWMB;
+}
+
+struct _arg_GO {
+ u8 chan;
+ u32 addr;
+ unsigned ns;
+};
+
+static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
+ const struct _arg_GO *arg)
+{
+ u8 chan = arg->chan;
+ u32 addr = arg->addr;
+ unsigned ns = arg->ns;
+
+ if (dry_run)
+ return SZ_DMAGO;
+
+ buf[0] = CMD_DMAGO;
+ buf[0] |= (ns << 1);
+
+ buf[1] = chan & 0x7;
+
+ *((u32 *)&buf[2]) = addr;
+
+ return SZ_DMAGO;
+}
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+/* Returns Time-Out */
+static bool _until_dmac_idle(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ unsigned long loops = msecs_to_loops(5);
+
+ do {
+ /* Until Manager is Idle */
+ if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--loops);
+
+ if (!loops)
+ return true;
+
+ return false;
+}
+
+static inline void _execute_DBGINSN(struct pl330_thread *thrd,
+ u8 insn[], bool as_manager)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u32 val;
+
+ val = (insn[0] << 16) | (insn[1] << 24);
+ if (!as_manager) {
+ val |= (1 << 0);
+ val |= (thrd->id << 8); /* Channel Number */
+ }
+ writel(val, regs + DBGINST0);
+
+ val = *((u32 *)&insn[2]);
+ writel(val, regs + DBGINST1);
+
+ /* If timed out due to halted state-machine */
+ if (_until_dmac_idle(thrd)) {
+ dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
+ return;
+ }
+
+ /* Get going */
+ writel(0, regs + DBGCMD);
+}
+
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+ struct _pl330_req *req = &thrd->req[idx];
+
+ _emit_END(0, req->mc_cpu);
+ req->mc_len = 0;
+
+ thrd->req_running = -1;
+}
+
+static inline u32 _state(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u32 val;
+
+ if (is_manager(thrd))
+ val = readl(regs + DS) & 0xf;
+ else
+ val = readl(regs + CS(thrd->id)) & 0xf;
+
+ switch (val) {
+ case DS_ST_STOP:
+ return PL330_STATE_STOPPED;
+ case DS_ST_EXEC:
+ return PL330_STATE_EXECUTING;
+ case DS_ST_CMISS:
+ return PL330_STATE_CACHEMISS;
+ case DS_ST_UPDTPC:
+ return PL330_STATE_UPDTPC;
+ case DS_ST_WFE:
+ return PL330_STATE_WFE;
+ case DS_ST_FAULT:
+ return PL330_STATE_FAULTING;
+ case DS_ST_ATBRR:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_ATBARRIER;
+ case DS_ST_QBUSY:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_QUEUEBUSY;
+ case DS_ST_WFP:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_WFP;
+ case DS_ST_KILL:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_KILLING;
+ case DS_ST_CMPLT:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_COMPLETING;
+ case DS_ST_FLTCMP:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_FAULT_COMPLETING;
+ default:
+ return PL330_STATE_INVALID;
+ }
+}
+
+static void _stop(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u8 insn[6] = {0, 0, 0, 0, 0, 0};
+
+ if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
+ UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
+
+ /* Return if nothing needs to be done */
+ if (_state(thrd) == PL330_STATE_COMPLETING
+ || _state(thrd) == PL330_STATE_KILLING
+ || _state(thrd) == PL330_STATE_STOPPED)
+ return;
+
+ _emit_KILL(0, insn);
+
+ /* Stop generating interrupts for SEV */
+ writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
+
+ _execute_DBGINSN(thrd, insn, is_manager(thrd));
+}
+
+/* Start doing req 'idx' of thread 'thrd' */
+static bool _trigger(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ struct _pl330_req *req;
+ struct pl330_req *r;
+ struct _arg_GO go;
+ unsigned ns;
+ u8 insn[6] = {0, 0, 0, 0, 0, 0};
+ int idx;
+
+ /* Return if already ACTIVE */
+ if (_state(thrd) != PL330_STATE_STOPPED)
+ return true;
+
+ idx = 1 - thrd->lstenq;
+ if (!IS_FREE(&thrd->req[idx]))
+ req = &thrd->req[idx];
+ else {
+ idx = thrd->lstenq;
+ if (!IS_FREE(&thrd->req[idx]))
+ req = &thrd->req[idx];
+ else
+ req = NULL;
+ }
+
+ /* Return if no request */
+ if (!req || !req->r)
+ return true;
+
+ r = req->r;
+
+ if (r->cfg)
+ ns = r->cfg->nonsecure ? 1 : 0;
+ else if (readl(regs + CS(thrd->id)) & CS_CNS)
+ ns = 1;
+ else
+ ns = 0;
+
+ /* See 'Abort Sources' point-4 at Page 2-25 */
+ if (_manager_ns(thrd) && !ns)
+ dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
+ __func__, __LINE__);
+
+ go.chan = thrd->id;
+ go.addr = req->mc_bus;
+ go.ns = ns;
+ _emit_GO(0, insn, &go);
+
+ /* Set to generate interrupts for SEV */
+ writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
+
+ /* Only manager can execute GO */
+ _execute_DBGINSN(thrd, insn, true);
+
+ thrd->req_running = idx;
+
+ return true;
+}
+
+static bool _start(struct pl330_thread *thrd)
+{
+ switch (_state(thrd)) {
+ case PL330_STATE_FAULT_COMPLETING:
+ UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
+
+ if (_state(thrd) == PL330_STATE_KILLING)
+ UNTIL(thrd, PL330_STATE_STOPPED)
+
+ case PL330_STATE_FAULTING:
+ _stop(thrd);
+
+ case PL330_STATE_KILLING:
+ case PL330_STATE_COMPLETING:
+ UNTIL(thrd, PL330_STATE_STOPPED)
+
+ case PL330_STATE_STOPPED:
+ return _trigger(thrd);
+
+ case PL330_STATE_WFP:
+ case PL330_STATE_QUEUEBUSY:
+ case PL330_STATE_ATBARRIER:
+ case PL330_STATE_UPDTPC:
+ case PL330_STATE_CACHEMISS:
+ case PL330_STATE_EXECUTING:
+ return true;
+
+ case PL330_STATE_WFE: /* For RESUME, nothing yet */
+ default:
+ return false;
+ }
+}
+
+static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+ struct pl330_config *pcfg = pxs->r->cfg->pcfg;
+
+ /* check lock-up free version */
+ if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
+ while (cyc--) {
+ off += _emit_LD(dry_run, &buf[off], ALWAYS);
+ off += _emit_ST(dry_run, &buf[off], ALWAYS);
+ }
+ } else {
+ while (cyc--) {
+ off += _emit_LD(dry_run, &buf[off], ALWAYS);
+ off += _emit_RMB(dry_run, &buf[off]);
+ off += _emit_ST(dry_run, &buf[off], ALWAYS);
+ off += _emit_WMB(dry_run, &buf[off]);
+ }
+ }
+
+ return off;
+}
+
+static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ while (cyc--) {
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_ST(dry_run, &buf[off], ALWAYS);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ }
+
+ return off;
+}
+
+static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ while (cyc--) {
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_LD(dry_run, &buf[off], ALWAYS);
+ off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ }
+
+ return off;
+}
+
+static int _bursts(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ switch (pxs->r->rqtype) {
+ case MEMTODEV:
+ off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+ break;
+ case DEVTOMEM:
+ off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+ break;
+ case MEMTOMEM:
+ off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
+ break;
+ default:
+ off += 0x40000000; /* Scare off the Client */
+ break;
+ }
+
+ return off;
+}
+
+/* Returns bytes consumed and updates bursts */
+static inline int _loop(unsigned dry_run, u8 buf[],
+ unsigned long *bursts, const struct _xfer_spec *pxs)
+{
+ int cyc, cycmax, szlp, szlpend, szbrst, off;
+ unsigned lcnt0, lcnt1, ljmp0, ljmp1;
+ struct _arg_LPEND lpend;
+
+ /* Max iterations possible in DMALP is 256 */
+ if (*bursts >= 256*256) {
+ lcnt1 = 256;
+ lcnt0 = 256;
+ cyc = *bursts / lcnt1 / lcnt0;
+ } else if (*bursts > 256) {
+ lcnt1 = 256;
+ lcnt0 = *bursts / lcnt1;
+ cyc = 1;
+ } else {
+ lcnt1 = *bursts;
+ lcnt0 = 0;
+ cyc = 1;
+ }
+
+ szlp = _emit_LP(1, buf, 0, 0);
+ szbrst = _bursts(1, buf, pxs, 1);
+
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 0;
+ lpend.bjump = 0;
+ szlpend = _emit_LPEND(1, buf, &lpend);
+
+ if (lcnt0) {
+ szlp *= 2;
+ szlpend *= 2;
+ }
+
+ /*
+ * Max bursts that we can unroll due to limit on the
+ * size of backward jump that can be encoded in DMALPEND
+ * which is 8-bits and hence 255
+ */
+ cycmax = (255 - (szlp + szlpend)) / szbrst;
+
+ cyc = (cycmax < cyc) ? cycmax : cyc;
+
+ off = 0;
+
+ if (lcnt0) {
+ off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
+ ljmp0 = off;
+ }
+
+ off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
+ ljmp1 = off;
+
+ off += _bursts(dry_run, &buf[off], pxs, cyc);
+
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 1;
+ lpend.bjump = off - ljmp1;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+
+ if (lcnt0) {
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 0;
+ lpend.bjump = off - ljmp0;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+ }
+
+ *bursts = lcnt1 * cyc;
+ if (lcnt0)
+ *bursts *= lcnt0;
+
+ return off;
+}
+
+static inline int _setup_loops(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
+{
+ struct pl330_xfer *x = pxs->x;
+ u32 ccr = pxs->ccr;
+ unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
+ int off = 0;
+
+ while (bursts) {
+ c = bursts;
+ off += _loop(dry_run, &buf[off], &c, pxs);
+ bursts -= c;
+ }
+
+ return off;
+}
+
+static inline int _setup_xfer(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
+{
+ struct pl330_xfer *x = pxs->x;
+ int off = 0;
+
+ /* DMAMOV SAR, x->src_addr */
+ off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
+ /* DMAMOV DAR, x->dst_addr */
+ off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
+
+ /* Setup Loop(s) */
+ off += _setup_loops(dry_run, &buf[off], pxs);
+
+ return off;
+}
+
+/*
+ * A req is a sequence of one or more xfer units.
+ * Returns the number of bytes taken to setup the MC for the req.
+ */
+static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
+ unsigned index, struct _xfer_spec *pxs)
+{
+ struct _pl330_req *req = &thrd->req[index];
+ struct pl330_xfer *x;
+ u8 *buf = req->mc_cpu;
+ int off = 0;
+
+ PL330_DBGMC_START(req->mc_bus);
+
+ /* DMAMOV CCR, ccr */
+ off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
+
+ x = pxs->r->x;
+ do {
+ /* Error if xfer length is not aligned at burst size */
+ if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
+ return -EINVAL;
+
+ pxs->x = x;
+ off += _setup_xfer(dry_run, &buf[off], pxs);
+
+ x = x->next;
+ } while (x);
+
+ /* DMASEV peripheral/event */
+ off += _emit_SEV(dry_run, &buf[off], thrd->ev);
+ /* DMAEND */
+ off += _emit_END(dry_run, &buf[off]);
+
+ return off;
+}
+
+static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
+{
+ u32 ccr = 0;
+
+ if (rqc->src_inc)
+ ccr |= CC_SRCINC;
+
+ if (rqc->dst_inc)
+ ccr |= CC_DSTINC;
+
+ /* We set same protection levels for Src and DST for now */
+ if (rqc->privileged)
+ ccr |= CC_SRCPRI | CC_DSTPRI;
+ if (rqc->nonsecure)
+ ccr |= CC_SRCNS | CC_DSTNS;
+ if (rqc->insnaccess)
+ ccr |= CC_SRCIA | CC_DSTIA;
+
+ ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
+ ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
+
+ ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
+ ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
+
+ ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
+ ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
+
+ ccr |= (rqc->swap << CC_SWAP_SHFT);
+
+ return ccr;
+}
+
+static inline bool _is_valid(u32 ccr)
+{
+ enum pl330_dstcachectrl dcctl;
+ enum pl330_srccachectrl scctl;
+
+ dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
+ scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
+
+ if (dcctl == DINVALID1 || dcctl == DINVALID2
+ || scctl == SINVALID1 || scctl == SINVALID2)
+ return false;
+ else
+ return true;
+}
+
+/*
+ * Submit a list of xfers after which the client wants notification.
+ * Client is not notified after each xfer unit, just once after all
+ * xfer units are done or some error occurs.
+ */
+static int pl330_submit_req(void *ch_id, struct pl330_req *r)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ struct pl330_info *pi;
+ struct _xfer_spec xs;
+ unsigned long flags;
+ void __iomem *regs;
+ unsigned idx;
+ u32 ccr;
+ int ret = 0;
+
+ /* No Req or Unacquired Channel or DMAC */
+ if (!r || !thrd || thrd->free)
+ return -EINVAL;
+
+ pl330 = thrd->dmac;
+ pi = pl330->pinfo;
+ regs = pi->base;
+
+ if (pl330->state == DYING
+ || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
+ dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
+ __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ /* If request for non-existing peripheral */
+ if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
+ dev_info(thrd->dmac->pinfo->dev,
+ "%s:%d Invalid peripheral(%u)!\n",
+ __func__, __LINE__, r->peri);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ if (_queue_full(thrd)) {
+ ret = -EAGAIN;
+ goto xfer_exit;
+ }
+
+ /* Prefer Secure Channel */
+ if (!_manager_ns(thrd))
+ r->cfg->nonsecure = 0;
+ else
+ r->cfg->nonsecure = 1;
+
+ /* Use last settings, if not provided */
+ if (r->cfg)
+ ccr = _prepare_ccr(r->cfg);
+ else
+ ccr = readl(regs + CC(thrd->id));
+
+ /* If this req doesn't have valid xfer settings */
+ if (!_is_valid(ccr)) {
+ ret = -EINVAL;
+ dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
+ __func__, __LINE__, ccr);
+ goto xfer_exit;
+ }
+
+ idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
+
+ xs.ccr = ccr;
+ xs.r = r;
+
+ /* First dry run to check if req is acceptable */
+ ret = _setup_req(1, thrd, idx, &xs);
+ if (ret < 0)
+ goto xfer_exit;
+
+ if (ret > pi->mcbufsz / 2) {
+ dev_info(thrd->dmac->pinfo->dev,
+ "%s:%d Trying increasing mcbufsz\n",
+ __func__, __LINE__);
+ ret = -ENOMEM;
+ goto xfer_exit;
+ }
+
+ /* Hook the request */
+ thrd->lstenq = idx;
+ thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
+ thrd->req[idx].r = r;
+
+ ret = 0;
+
+xfer_exit:
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ return ret;
+}
+
+static void pl330_dotask(unsigned long data)
+{
+ struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
+ struct pl330_info *pi = pl330->pinfo;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ /* The DMAC itself gone nuts */
+ if (pl330->dmac_tbd.reset_dmac) {
+ pl330->state = DYING;
+ /* Reset the manager too */
+ pl330->dmac_tbd.reset_mngr = true;
+ /* Clear the reset flag */
+ pl330->dmac_tbd.reset_dmac = false;
+ }
+
+ if (pl330->dmac_tbd.reset_mngr) {
+ _stop(pl330->manager);
+ /* Reset all channels */
+ pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
+ /* Clear the reset flag */
+ pl330->dmac_tbd.reset_mngr = false;
+ }
+
+ for (i = 0; i < pi->pcfg.num_chan; i++) {
+
+ if (pl330->dmac_tbd.reset_chan & (1 << i)) {
+ struct pl330_thread *thrd = &pl330->channels[i];
+ void __iomem *regs = pi->base;
+ enum pl330_op_err err;
+
+ _stop(thrd);
+
+ if (readl(regs + FSC) & (1 << thrd->id))
+ err = PL330_ERR_FAIL;
+ else
+ err = PL330_ERR_ABORT;
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ _callback(thrd->req[1 - thrd->lstenq].r, err);
+ _callback(thrd->req[thrd->lstenq].r, err);
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ thrd->req[0].r = NULL;
+ thrd->req[1].r = NULL;
+ mark_free(thrd, 0);
+ mark_free(thrd, 1);
+
+ /* Clear the reset flag */
+ pl330->dmac_tbd.reset_chan &= ~(1 << i);
+ }
+ }
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ return;
+}
+
+/* Returns 1 if state was updated, 0 otherwise */
+static int pl330_update(const struct pl330_info *pi)
+{
+ struct _pl330_req *rqdone;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+ void __iomem *regs;
+ u32 val;
+ int id, ev, ret = 0;
+
+ if (!pi || !pi->pl330_data)
+ return 0;
+
+ regs = pi->base;
+ pl330 = pi->pl330_data;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ val = readl(regs + FSM) & 0x1;
+ if (val)
+ pl330->dmac_tbd.reset_mngr = true;
+ else
+ pl330->dmac_tbd.reset_mngr = false;
+
+ val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
+ pl330->dmac_tbd.reset_chan |= val;
+ if (val) {
+ int i = 0;
+ while (i < pi->pcfg.num_chan) {
+ if (val & (1 << i)) {
+ dev_info(pi->dev,
+ "Reset Channel-%d\t CS-%x FTC-%x\n",
+ i, readl(regs + CS(i)),
+ readl(regs + FTC(i)));
+ _stop(&pl330->channels[i]);
+ }
+ i++;
+ }
+ }
+
+ /* Check which event happened i.e, thread notified */
+ val = readl(regs + ES);
+ if (pi->pcfg.num_events < 32
+ && val & ~((1 << pi->pcfg.num_events) - 1)) {
+ pl330->dmac_tbd.reset_dmac = true;
+ dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
+ ret = 1;
+ goto updt_exit;
+ }
+
+ for (ev = 0; ev < pi->pcfg.num_events; ev++) {
+ if (val & (1 << ev)) { /* Event occurred */
+ struct pl330_thread *thrd;
+ u32 inten = readl(regs + INTEN);
+ int active;
+
+ /* Clear the event */
+ if (inten & (1 << ev))
+ writel(1 << ev, regs + INTCLR);
+
+ ret = 1;
+
+ id = pl330->events[ev];
+
+ thrd = &pl330->channels[id];
+
+ active = thrd->req_running;
+ if (active == -1) /* Aborted */
+ continue;
+
+ rqdone = &thrd->req[active];
+ mark_free(thrd, active);
+
+ /* Get going again ASAP */
+ _start(thrd);
+
+ /* For now, just make a list of callbacks to be done */
+ list_add_tail(&rqdone->rqd, &pl330->req_done);
+ }
+ }
+
+ /* Now that we are in no hurry, do the callbacks */
+ while (!list_empty(&pl330->req_done)) {
+ struct pl330_req *r;
+
+ rqdone = container_of(pl330->req_done.next,
+ struct _pl330_req, rqd);
+
+ list_del_init(&rqdone->rqd);
+
+ /* Detach the req */
+ r = rqdone->r;
+ rqdone->r = NULL;
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+ _callback(r, PL330_ERR_NONE);
+ spin_lock_irqsave(&pl330->lock, flags);
+ }
+
+updt_exit:
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ if (pl330->dmac_tbd.reset_dmac
+ || pl330->dmac_tbd.reset_mngr
+ || pl330->dmac_tbd.reset_chan) {
+ ret = 1;
+ tasklet_schedule(&pl330->tasks);
+ }
+
+ return ret;
+}
+
+static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+ int ret = 0, active;
+
+ if (!thrd || thrd->free || thrd->dmac->state == DYING)
+ return -EINVAL;
+
+ pl330 = thrd->dmac;
+ active = thrd->req_running;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ switch (op) {
+ case PL330_OP_FLUSH:
+ /* Make sure the channel is stopped */
+ _stop(thrd);
+
+ thrd->req[0].r = NULL;
+ thrd->req[1].r = NULL;
+ mark_free(thrd, 0);
+ mark_free(thrd, 1);
+ break;
+
+ case PL330_OP_ABORT:
+ /* Make sure the channel is stopped */
+ _stop(thrd);
+
+ /* ABORT is only for the active req */
+ if (active == -1)
+ break;
+
+ thrd->req[active].r = NULL;
+ mark_free(thrd, active);
+
+ /* Start the next */
+ case PL330_OP_START:
+ if ((active == -1) && !_start(thrd))
+ ret = -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+ return ret;
+}
+
+/* Reserve an event */
+static inline int _alloc_event(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+ struct pl330_info *pi = pl330->pinfo;
+ int ev;
+
+ for (ev = 0; ev < pi->pcfg.num_events; ev++)
+ if (pl330->events[ev] == -1) {
+ pl330->events[ev] = thrd->id;
+ return ev;
+ }
+
+ return -1;
+}
+
+static bool _chan_ns(const struct pl330_info *pi, int i)
+{
+ return pi->pcfg.irq_ns & (1 << i);
+}
+
+/* Upon success, returns IdentityToken for the
+ * allocated channel, NULL otherwise.
+ */
+static void *pl330_request_channel(const struct pl330_info *pi)
+{
+ struct pl330_thread *thrd = NULL;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+ int chans, i;
+
+ if (!pi || !pi->pl330_data)
+ return NULL;
+
+ pl330 = pi->pl330_data;
+
+ if (pl330->state == DYING)
+ return NULL;
+
+ chans = pi->pcfg.num_chan;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ for (i = 0; i < chans; i++) {
+ thrd = &pl330->channels[i];
+ if ((thrd->free) && (!_manager_ns(thrd) ||
+ _chan_ns(pi, i))) {
+ thrd->ev = _alloc_event(thrd);
+ if (thrd->ev >= 0) {
+ thrd->free = false;
+ thrd->lstenq = 1;
+ thrd->req[0].r = NULL;
+ mark_free(thrd, 0);
+ thrd->req[1].r = NULL;
+ mark_free(thrd, 1);
+ break;
+ }
+ }
+ thrd = NULL;
+ }
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ return thrd;
+}
+
+/* Release an event */
+static inline void _free_event(struct pl330_thread *thrd, int ev)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+ struct pl330_info *pi = pl330->pinfo;
+
+ /* If the event is valid and was held by the thread */
+ if (ev >= 0 && ev < pi->pcfg.num_events
+ && pl330->events[ev] == thrd->id)
+ pl330->events[ev] = -1;
+}
+
+static void pl330_release_channel(void *ch_id)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+
+ if (!thrd || thrd->free)
+ return;
+
+ _stop(thrd);
+
+ _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
+ _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
+
+ pl330 = thrd->dmac;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+ _free_event(thrd, thrd->ev);
+ thrd->free = true;
+ spin_unlock_irqrestore(&pl330->lock, flags);
+}
+
+/* Initialize the structure for PL330 configuration, that can be used
+ * by the client driver the make best use of the DMAC
+ */
+static void read_dmac_config(struct pl330_info *pi)
+{
+ void __iomem *regs = pi->base;
+ u32 val;
+
+ val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
+ val &= CRD_DATA_WIDTH_MASK;
+ pi->pcfg.data_bus_width = 8 * (1 << val);
+
+ val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
+ val &= CRD_DATA_BUFF_MASK;
+ pi->pcfg.data_buf_dep = val + 1;
+
+ val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
+ val &= CR0_NUM_CHANS_MASK;
+ val += 1;
+ pi->pcfg.num_chan = val;
+
+ val = readl(regs + CR0);
+ if (val & CR0_PERIPH_REQ_SET) {
+ val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
+ val += 1;
+ pi->pcfg.num_peri = val;
+ pi->pcfg.peri_ns = readl(regs + CR4);
+ } else {
+ pi->pcfg.num_peri = 0;
+ }
+
+ val = readl(regs + CR0);
+ if (val & CR0_BOOT_MAN_NS)
+ pi->pcfg.mode |= DMAC_MODE_NS;
+ else
+ pi->pcfg.mode &= ~DMAC_MODE_NS;
+
+ val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
+ val &= CR0_NUM_EVENTS_MASK;
+ val += 1;
+ pi->pcfg.num_events = val;
+
+ pi->pcfg.irq_ns = readl(regs + CR3);
+
+ pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
+ pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
+}
+
+static inline void _reset_thread(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+ struct pl330_info *pi = pl330->pinfo;
+
+ thrd->req[0].mc_cpu = pl330->mcode_cpu
+ + (thrd->id * pi->mcbufsz);
+ thrd->req[0].mc_bus = pl330->mcode_bus
+ + (thrd->id * pi->mcbufsz);
+ thrd->req[0].r = NULL;
+ mark_free(thrd, 0);
+
+ thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
+ + pi->mcbufsz / 2;
+ thrd->req[1].mc_bus = thrd->req[0].mc_bus
+ + pi->mcbufsz / 2;
+ thrd->req[1].r = NULL;
+ mark_free(thrd, 1);
+}
+
+static int dmac_alloc_threads(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+ struct pl330_thread *thrd;
+ int i;
+
+ /* Allocate 1 Manager and 'chans' Channel threads */
+ pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
+ GFP_KERNEL);
+ if (!pl330->channels)
+ return -ENOMEM;
+
+ /* Init Channel threads */
+ for (i = 0; i < chans; i++) {
+ thrd = &pl330->channels[i];
+ thrd->id = i;
+ thrd->dmac = pl330;
+ _reset_thread(thrd);
+ thrd->free = true;
+ }
+
+ /* MANAGER is indexed at the end */
+ thrd = &pl330->channels[chans];
+ thrd->id = chans;
+ thrd->dmac = pl330;
+ thrd->free = false;
+ pl330->manager = thrd;
+
+ return 0;
+}
+
+static int dmac_alloc_resources(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+ int ret;
+
+ /*
+ * Alloc MicroCode buffer for 'chans' Channel threads.
+ * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
+ */
+ pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
+ chans * pi->mcbufsz,
+ &pl330->mcode_bus, GFP_KERNEL);
+ if (!pl330->mcode_cpu) {
+ dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ ret = dmac_alloc_threads(pl330);
+ if (ret) {
+ dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
+ __func__, __LINE__);
+ dma_free_coherent(pi->dev,
+ chans * pi->mcbufsz,
+ pl330->mcode_cpu, pl330->mcode_bus);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pl330_add(struct pl330_info *pi)
+{
+ struct pl330_dmac *pl330;
+ void __iomem *regs;
+ int i, ret;
+
+ if (!pi || !pi->dev)
+ return -EINVAL;
+
+ /* If already added */
+ if (pi->pl330_data)
+ return -EINVAL;
+
+ /*
+ * If the SoC can perform reset on the DMAC, then do it
+ * before reading its configuration.
+ */
+ if (pi->dmac_reset)
+ pi->dmac_reset(pi);
+
+ regs = pi->base;
+
+ /* Check if we can handle this DMAC */
+ if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
+ || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
+ dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
+ get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
+ return -EINVAL;
+ }
+
+ /* Read the configuration of the DMAC */
+ read_dmac_config(pi);
+
+ if (pi->pcfg.num_events == 0) {
+ dev_err(pi->dev, "%s:%d Can't work without events!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
+ if (!pl330) {
+ dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* Assign the info structure and private data */
+ pl330->pinfo = pi;
+ pi->pl330_data = pl330;
+
+ spin_lock_init(&pl330->lock);
+
+ INIT_LIST_HEAD(&pl330->req_done);
+
+ /* Use default MC buffer size if not provided */
+ if (!pi->mcbufsz)
+ pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
+
+ /* Mark all events as free */
+ for (i = 0; i < pi->pcfg.num_events; i++)
+ pl330->events[i] = -1;
+
+ /* Allocate resources needed by the DMAC */
+ ret = dmac_alloc_resources(pl330);
+ if (ret) {
+ dev_err(pi->dev, "Unable to create channels for DMAC\n");
+ kfree(pl330);
+ return ret;
+ }
+
+ tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
+
+ pl330->state = INIT;
+
+ return 0;
+}
+
+static int dmac_free_threads(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+ struct pl330_thread *thrd;
+ int i;
+
+ /* Release Channel threads */
+ for (i = 0; i < chans; i++) {
+ thrd = &pl330->channels[i];
+ pl330_release_channel((void *)thrd);
+ }
+
+ /* Free memory */
+ kfree(pl330->channels);
+
+ return 0;
+}
+
+static void dmac_free_resources(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+
+ dmac_free_threads(pl330);
+
+ dma_free_coherent(pi->dev, chans * pi->mcbufsz,
+ pl330->mcode_cpu, pl330->mcode_bus);
+}
+
+static void pl330_del(struct pl330_info *pi)
+{
+ struct pl330_dmac *pl330;
+
+ if (!pi || !pi->pl330_data)
+ return;
+
+ pl330 = pi->pl330_data;
+
+ pl330->state = UNINIT;
+
+ tasklet_kill(&pl330->tasks);
+
+ /* Free DMAC resources */
+ dmac_free_resources(pl330);
+
+ kfree(pl330);
+ pi->pl330_data = NULL;
+}
+
/* forward declaration */
static struct amba_driver pl330_driver;
@@ -234,7 +2320,7 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
- pch->completed = desc->txd.cookie;
+ dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}
@@ -305,7 +2391,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&pch->lock, flags);
- pch->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
pch->cyclic = false;
pch->pl330_chid = pl330_request_channel(&pdmac->pif);
@@ -340,7 +2426,6 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
/* Mark all desc done */
list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
desc->status = DONE;
- pch->completed = desc->txd.cookie;
list_move_tail(&desc->node, &list);
}
@@ -396,18 +2481,7 @@ static enum dma_status
pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct dma_pl330_chan *pch = to_pchan(chan);
- dma_cookie_t last_done, last_used;
- int ret;
-
- last_done = pch->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_done, last_used);
-
- dma_set_tx_state(txstate, last_done, last_used, 0);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void pl330_issue_pending(struct dma_chan *chan)
@@ -430,26 +2504,16 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&pch->lock, flags);
/* Assign cookies to all nodes */
- cookie = tx->chan->cookie;
-
while (!list_empty(&last->node)) {
desc = list_entry(last->node.next, struct dma_pl330_desc, node);
- if (++cookie < 0)
- cookie = 1;
- desc->txd.cookie = cookie;
+ dma_cookie_assign(&desc->txd);
list_move_tail(&desc->node, &pch->work_list);
}
- if (++cookie < 0)
- cookie = 1;
- last->txd.cookie = cookie;
-
+ cookie = dma_cookie_assign(&last->txd);
list_add_tail(&last->node, &pch->work_list);
-
- tx->chan->cookie = cookie;
-
spin_unlock_irqrestore(&pch->lock, flags);
return cookie;
@@ -553,6 +2617,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
async_tx_ack(&desc->txd);
desc->req.peri = peri_id ? pch->chan.chan_id : 0;
+ desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -621,7 +2686,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -711,7 +2777,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flg)
+ unsigned long flg, void *context)
{
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -829,7 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(pdmac->clk)) {
dev_err(&adev->dev, "Cannot get operation clock.\n");
ret = -EINVAL;
- goto probe_err1;
+ goto probe_err2;
}
amba_set_drvdata(adev, pdmac);
@@ -843,11 +2909,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
- goto probe_err2;
+ goto probe_err3;
ret = pl330_add(pi);
if (ret)
- goto probe_err3;
+ goto probe_err4;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -904,7 +2970,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err4;
+ goto probe_err5;
}
dev_info(&adev->dev,
@@ -917,10 +2983,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
-probe_err4:
+probe_err5:
pl330_del(pi);
-probe_err3:
+probe_err4:
free_irq(irq, pi);
+probe_err3:
+#ifndef CONFIG_PM_RUNTIME
+ clk_disable(pdmac->clk);
+#endif
+ clk_put(pdmac->clk);
probe_err2:
iounmap(pi->base);
probe_err1:
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fc457a7e8832..ced98826684a 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -46,6 +46,7 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include "adma.h"
+#include "../dmaengine.h"
enum ppc_adma_init_code {
PPC_ADMA_INIT_OK = 0,
@@ -1930,7 +1931,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
if (end_of_chain && slot_cnt) {
/* Should wait for ZeroSum completion */
if (cookie > 0)
- chan->completed_cookie = cookie;
+ chan->common.completed_cookie = cookie;
return;
}
@@ -1960,7 +1961,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
BUG_ON(!seen_current);
if (cookie > 0) {
- chan->completed_cookie = cookie;
+ chan->common.completed_cookie = cookie;
pr_debug("\tcompleted cookie %d\n", cookie);
}
@@ -2150,22 +2151,6 @@ static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
}
/**
- * ppc440spe_desc_assign_cookie - assign a cookie
- */
-static dma_cookie_t ppc440spe_desc_assign_cookie(
- struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *desc)
-{
- dma_cookie_t cookie = chan->common.cookie;
-
- cookie++;
- if (cookie < 0)
- cookie = 1;
- chan->common.cookie = desc->async_tx.cookie = cookie;
- return cookie;
-}
-
-/**
* ppc440spe_rxor_set_region_data -
*/
static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
@@ -2235,8 +2220,7 @@ static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
slots_per_op = group_start->slots_per_op;
spin_lock_bh(&chan->lock);
-
- cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
+ cookie = dma_cookie_assign(tx);
if (unlikely(list_empty(&chan->chain))) {
/* first peer */
@@ -3944,28 +3928,16 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct ppc440spe_adma_chan *ppc440spe_chan;
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
enum dma_status ret;
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
- last_used = chan->cookie;
- last_complete = ppc440spe_chan->completed_cookie;
-
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
return ret;
ppc440spe_adma_slot_cleanup(ppc440spe_chan);
- last_used = chan->cookie;
- last_complete = ppc440spe_chan->completed_cookie;
-
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
- return dma_async_is_complete(cookie, last_complete, last_used);
+ return dma_cookie_status(chan, cookie, txstate);
}
/**
@@ -4050,16 +4022,12 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
async_tx_ack(&sw_desc->async_tx);
ppc440spe_desc_init_null_xor(group_start);
- cookie = chan->common.cookie;
- cookie++;
- if (cookie <= 1)
- cookie = 2;
+ cookie = dma_cookie_assign(&sw_desc->async_tx);
/* initialize the completed cookie to be less than
* the most recently used cookie
*/
- chan->completed_cookie = cookie - 1;
- chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+ chan->common.completed_cookie = cookie - 1;
/* channel should not be busy */
BUG_ON(ppc440spe_chan_is_busy(chan));
@@ -4529,6 +4497,7 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev)
INIT_LIST_HEAD(&chan->all_slots);
chan->device = adev;
chan->common.device = &adev->common;
+ dma_cookie_init(&chan->common);
list_add_tail(&chan->common.device_node, &adev->common.channels);
tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
(unsigned long)chan);
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
index 8ada5a812e3b..26b7a5ed9ac7 100644
--- a/drivers/dma/ppc4xx/adma.h
+++ b/drivers/dma/ppc4xx/adma.h
@@ -81,7 +81,6 @@ struct ppc440spe_adma_device {
* @common: common dmaengine channel object members
* @all_slots: complete domain of slots usable by the channel
* @pending: allows batching of hardware operations
- * @completed_cookie: identifier for the most recently completed operation
* @slots_allocated: records the actual size of the descriptor slot pool
* @hw_chain_inited: h/w descriptor chain initialization flag
* @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan {
struct list_head all_slots;
struct ppc440spe_adma_desc_slot *last_used;
int pending;
- dma_cookie_t completed_cookie;
int slots_allocated;
int hw_chain_inited;
struct tasklet_struct irq_tasklet;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 812fd76e9c18..19d7a8d3975d 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -30,6 +30,8 @@
#include <linux/kdebug.h>
#include <linux/spinlock.h>
#include <linux/rculist.h>
+
+#include "dmaengine.h"
#include "shdma.h"
/* DMA descriptor control */
@@ -296,13 +298,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
else
power_up = false;
- cookie = sh_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
-
- sh_chan->common.cookie = cookie;
- tx->cookie = cookie;
+ cookie = dma_cookie_assign(tx);
/* Mark all chunks of this descriptor as submitted, move to the queue */
list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
@@ -673,7 +669,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
@@ -764,12 +761,12 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
cookie = tx->cookie;
if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
- if (sh_chan->completed_cookie != desc->cookie - 1)
+ if (sh_chan->common.completed_cookie != desc->cookie - 1)
dev_dbg(sh_chan->dev,
"Completing cookie %d, expected %d\n",
desc->cookie,
- sh_chan->completed_cookie + 1);
- sh_chan->completed_cookie = desc->cookie;
+ sh_chan->common.completed_cookie + 1);
+ sh_chan->common.completed_cookie = desc->cookie;
}
/* Call callback on the last chunk */
@@ -823,7 +820,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
* Terminating and the loop completed normally: forgive
* uncompleted cookies
*/
- sh_chan->completed_cookie = sh_chan->common.cookie;
+ sh_chan->common.completed_cookie = sh_chan->common.cookie;
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
@@ -883,23 +880,14 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
enum dma_status status;
unsigned long flags;
sh_dmae_chan_ld_cleanup(sh_chan, false);
- /* First read completed cookie to avoid a skew */
- last_complete = sh_chan->completed_cookie;
- rmb();
- last_used = chan->cookie;
- BUG_ON(last_complete < 0);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
spin_lock_irqsave(&sh_chan->desc_lock, flags);
- status = dma_async_is_complete(cookie, last_complete, last_used);
+ status = dma_cookie_status(chan, cookie, txstate);
/*
* If we don't find cookie on the queue, it has been aborted and we have
@@ -1102,6 +1090,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
/* reference struct dma_device */
new_sh_chan->common.device = &shdev->common;
+ dma_cookie_init(&new_sh_chan->common);
new_sh_chan->dev = shdev->common.dev;
new_sh_chan->id = id;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b55a276dc5b..0b1d2c105f02 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -30,7 +30,6 @@ enum dmae_pm_state {
};
struct sh_dmae_chan {
- dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_queue; /* Link descriptors queue */
struct list_head ld_free; /* Link descriptors free */
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 2333810d1688..434ad31174f2 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,8 @@
#include <linux/of_platform.h>
#include <linux/sirfsoc_dma.h>
+#include "dmaengine.h"
+
#define SIRFSOC_DMA_DESCRIPTORS 16
#define SIRFSOC_DMA_CHANNELS 16
@@ -59,7 +61,6 @@ struct sirfsoc_dma_chan {
struct list_head queued;
struct list_head active;
struct list_head completed;
- dma_cookie_t completed_cookie;
unsigned long happened_cyclic;
unsigned long completed_cyclic;
@@ -208,7 +209,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
/* Free descriptors */
spin_lock_irqsave(&schan->lock, flags);
list_splice_tail_init(&list, &schan->free);
- schan->completed_cookie = last_cookie;
+ schan->chan.completed_cookie = last_cookie;
spin_unlock_irqrestore(&schan->lock, flags);
} else {
/* for cyclic channel, desc is always in active list */
@@ -258,13 +259,7 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
/* Move descriptor to queue */
list_move_tail(&sdesc->node, &schan->queued);
- /* Update cookie */
- cookie = schan->chan.cookie + 1;
- if (cookie <= 0)
- cookie = 1;
-
- schan->chan.cookie = cookie;
- sdesc->desc.cookie = cookie;
+ cookie = dma_cookie_assign(txd);
spin_unlock_irqrestore(&schan->lock, flags);
@@ -414,16 +409,13 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
unsigned long flags;
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
+ enum dma_status ret;
spin_lock_irqsave(&schan->lock, flags);
- last_used = schan->chan.cookie;
- last_complete = schan->completed_cookie;
+ ret = dma_cookie_status(chan, cookie, txstate);
spin_unlock_irqrestore(&schan->lock, flags);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- return dma_async_is_complete(cookie, last_complete, last_used);
+ return ret;
}
static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
@@ -497,7 +489,7 @@ err_dir:
static struct dma_async_tx_descriptor *
sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
+ enum dma_transfer_direction direction, void *context)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
struct sirfsoc_dma_desc *sdesc = NULL;
@@ -635,8 +627,7 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
schan = &sdma->channels[i];
schan->chan.device = dma;
- schan->chan.cookie = 1;
- schan->completed_cookie = schan->chan.cookie;
+ dma_cookie_init(&schan->chan);
INIT_LIST_HEAD(&schan->free);
INIT_LIST_HEAD(&schan->prepared);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cc5ecbc067a3..bdd41d4bfa8d 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -21,6 +21,7 @@
#include <plat/ste_dma40.h>
+#include "dmaengine.h"
#include "ste_dma40_ll.h"
#define D40_NAME "dma40"
@@ -220,8 +221,6 @@ struct d40_base;
*
* @lock: A spinlock to protect this struct.
* @log_num: The logical number, if any of this channel.
- * @completed: Starts with 1, after first interrupt it is set to dma engine's
- * current cookie.
* @pending_tx: The number of pending transfers. Used between interrupt handler
* and tasklet.
* @busy: Set to true when transfer is ongoing on this channel.
@@ -250,8 +249,6 @@ struct d40_base;
struct d40_chan {
spinlock_t lock;
int log_num;
- /* ID of the most recent completed transfer */
- int completed;
int pending_tx;
bool busy;
struct d40_phy_res *phy_chan;
@@ -1223,21 +1220,14 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
chan);
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
+ dma_cookie_t cookie;
spin_lock_irqsave(&d40c->lock, flags);
-
- d40c->chan.cookie++;
-
- if (d40c->chan.cookie < 0)
- d40c->chan.cookie = 1;
-
- d40d->txd.cookie = d40c->chan.cookie;
-
+ cookie = dma_cookie_assign(tx);
d40_desc_queue(d40c, d40d);
-
spin_unlock_irqrestore(&d40c->lock, flags);
- return tx->cookie;
+ return cookie;
}
static int d40_start(struct d40_chan *d40c)
@@ -1357,7 +1347,7 @@ static void dma_tasklet(unsigned long data)
goto err;
if (!d40d->cyclic)
- d40c->completed = d40d->txd.cookie;
+ dma_cookie_complete(&d40d->txd);
/*
* If terminating a channel pending_tx is set to zero.
@@ -2182,7 +2172,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
bool is_free_phy;
spin_lock_irqsave(&d40c->lock, flags);
- d40c->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
/* If no dma configuration is set use default configuration (memcpy) */
if (!d40c->configured) {
@@ -2299,7 +2289,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
enum dma_transfer_direction direction,
- unsigned long dma_flags)
+ unsigned long dma_flags,
+ void *context)
{
if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
return NULL;
@@ -2310,7 +2301,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
+ enum dma_transfer_direction direction, void *context)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2342,25 +2333,19 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
+ enum dma_status ret;
if (d40c->phy_chan == NULL) {
chan_err(d40c, "Cannot read status of unallocated channel\n");
return -EINVAL;
}
- last_complete = d40c->completed;
- last_used = chan->cookie;
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret != DMA_SUCCESS)
+ dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
ret = DMA_PAUSED;
- else
- ret = dma_async_is_complete(cookie, last_complete, last_used);
-
- dma_set_tx_state(txstate, last_complete, last_used,
- stedma40_residue(chan));
return ret;
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a6f9c1684a0f..4e0dff59901d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -31,6 +31,8 @@
#include <linux/timb_dma.h>
+#include "dmaengine.h"
+
#define DRIVER_NAME "timb-dma"
/* Global DMA registers */
@@ -84,7 +86,6 @@ struct timb_dma_chan {
especially the lists and descriptors,
from races between the tasklet and calls
from above */
- dma_cookie_t last_completed_cookie;
bool ongoing;
struct list_head active_list;
struct list_head queue;
@@ -284,7 +285,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
else
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
*/
- td_chan->last_completed_cookie = txd->cookie;
+ dma_cookie_complete(txd);
td_chan->ongoing = false;
callback = txd->callback;
@@ -349,12 +350,7 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
dma_cookie_t cookie;
spin_lock_bh(&td_chan->lock);
-
- cookie = txd->chan->cookie;
- if (++cookie < 0)
- cookie = 1;
- txd->chan->cookie = cookie;
- txd->cookie = cookie;
+ cookie = dma_cookie_assign(txd);
if (list_empty(&td_chan->active_list)) {
dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
@@ -481,8 +477,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
}
spin_lock_bh(&td_chan->lock);
- td_chan->last_completed_cookie = 1;
- chan->cookie = 1;
+ dma_cookie_init(chan);
spin_unlock_bh(&td_chan->lock);
return 0;
@@ -515,24 +510,13 @@ static void td_free_chan_resources(struct dma_chan *chan)
static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct timb_dma_chan *td_chan =
- container_of(chan, struct timb_dma_chan, chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
+ enum dma_status ret;
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
- last_complete = td_chan->last_completed_cookie;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
-
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ ret = dma_cookie_status(chan, cookie, txstate);
- dev_dbg(chan2dev(chan),
- "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
- __func__, ret, last_complete, last_used);
+ dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
return ret;
}
@@ -558,7 +542,8 @@ static void td_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -766,7 +751,7 @@ static int __devinit td_probe(struct platform_device *pdev)
}
td_chan->chan.device = &td->dma;
- td_chan->chan.cookie = 1;
+ dma_cookie_init(&td_chan->chan);
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 6122c364cf11..913f55c76c99 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -15,6 +15,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
+
+#include "dmaengine.h"
#include "txx9dmac.h"
static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
@@ -279,21 +281,6 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
}
}
-/* Called with dc->lock held and bh disabled */
-static dma_cookie_t
-txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
-{
- dma_cookie_t cookie = dc->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- dc->chan.cookie = cookie;
- desc->txd.cookie = cookie;
-
- return cookie;
-}
-
/*----------------------------------------------------------------------*/
static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
@@ -424,7 +411,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
- dc->completed = txd->cookie;
+ dma_cookie_complete(txd);
callback = txd->callback;
param = txd->callback_param;
@@ -738,7 +725,7 @@ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
spin_lock_bh(&dc->lock);
- cookie = txx9dmac_assign_cookie(dc, desc);
+ cookie = dma_cookie_assign(tx);
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
desc->txd.cookie, desc);
@@ -846,7 +833,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_dev *ddev = dc->ddev;
@@ -972,27 +959,17 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
+ enum dma_status ret;
- last_complete = dc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
spin_lock_bh(&dc->lock);
txx9dmac_scan_descriptors(dc);
spin_unlock_bh(&dc->lock);
- last_complete = dc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
}
- dma_set_tx_state(txstate, last_complete, last_used, 0);
-
return ret;
}
@@ -1057,7 +1034,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
return -EIO;
}
- dc->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
txx9dmac_chan_set_SMPCHN(dc);
@@ -1186,7 +1163,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->ddev->chan[ch] = dc;
dc->chan.device = &dc->dma;
list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
- dc->chan.cookie = dc->completed = 1;
+ dma_cookie_init(&dc->chan);
if (is_dmac64(dc))
dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index 365d42366b9f..f5a760598882 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -172,7 +172,6 @@ struct txx9dmac_chan {
spinlock_t lock;
/* these other elements are all protected by lock */
- dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2d3ee1d183a..5689ce62fd81 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -22,6 +22,8 @@
#include <linux/syscore_ops.h>
#include <linux/slab.h>
+#include <mach/irqs.h>
+
/*
* We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
* one set of registers. The register offsets are organized below:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0694e170a338..1a7559b59997 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -467,6 +467,10 @@ static int i915_drm_freeze(struct drm_device *dev)
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 1);
+ console_unlock();
+
return 0;
}
@@ -539,6 +543,9 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
return error;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9cec6c3937fa..5a14149b3794 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -382,7 +382,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
-
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 2d8766978388..19ecd78b8a2c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -254,6 +254,16 @@ void intel_fbdev_fini(struct drm_device *dev)
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+}
+
MODULE_LICENSE("GPL and additional rights");
void intel_fb_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index e46a86776a6b..64ca7113ff28 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -17,7 +17,7 @@
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
- if (INPUT_COMPAT_TEST) {
+ if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
@@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer,
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
- if (INPUT_COMPAT_TEST) {
+ if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 22be27b424de..148f66fe3205 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -67,7 +67,7 @@ struct ff_effect_compat {
static inline size_t input_event_size(void)
{
- return INPUT_COMPAT_TEST ?
+ return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ?
sizeof(struct input_event_compat) : sizeof(struct input_event);
}
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 24044dacbf70..c65b5fa69f1e 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -107,6 +107,9 @@ static int __init amijoy_init(void)
int i, j;
int err;
+ if (!MACH_IS_AMIGA)
+ return -ENODEV;
+
for (i = 0; i < 2; i++) {
if (!amijoy[i])
continue;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ed1ed469d085..62bfce468f9f 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -28,14 +28,18 @@
#include <linux/gpio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/spinlock.h>
struct gpio_button_data {
- struct gpio_keys_button *button;
+ const struct gpio_keys_button *button;
struct input_dev *input;
struct timer_list timer;
struct work_struct work;
- int timer_debounce; /* in msecs */
+ unsigned int timer_debounce; /* in msecs */
+ unsigned int irq;
+ spinlock_t lock;
bool disabled;
+ bool key_pressed;
};
struct gpio_keys_drvdata {
@@ -114,7 +118,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
/*
* Disable IRQ and possible debouncing timer.
*/
- disable_irq(gpio_to_irq(bdata->button->gpio));
+ disable_irq(bdata->irq);
if (bdata->timer_debounce)
del_timer_sync(&bdata->timer);
@@ -135,7 +139,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
static void gpio_keys_enable_button(struct gpio_button_data *bdata)
{
if (bdata->disabled) {
- enable_irq(gpio_to_irq(bdata->button->gpio));
+ enable_irq(bdata->irq);
bdata->disabled = false;
}
}
@@ -195,7 +199,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
* @type: button type (%EV_KEY, %EV_SW)
*
* This function parses stringified bitmap from @buf and disables/enables
- * GPIO buttons accordinly. Returns 0 on success and negative error
+ * GPIO buttons accordingly. Returns 0 on success and negative error
* on failure.
*/
static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
@@ -320,9 +324,9 @@ static struct attribute_group gpio_keys_attr_group = {
.attrs = gpio_keys_attrs,
};
-static void gpio_keys_report_event(struct gpio_button_data *bdata)
+static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
{
- struct gpio_keys_button *button = bdata->button;
+ const struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
@@ -336,27 +340,26 @@ static void gpio_keys_report_event(struct gpio_button_data *bdata)
input_sync(input);
}
-static void gpio_keys_work_func(struct work_struct *work)
+static void gpio_keys_gpio_work_func(struct work_struct *work)
{
struct gpio_button_data *bdata =
container_of(work, struct gpio_button_data, work);
- gpio_keys_report_event(bdata);
+ gpio_keys_gpio_report_event(bdata);
}
-static void gpio_keys_timer(unsigned long _data)
+static void gpio_keys_gpio_timer(unsigned long _data)
{
- struct gpio_button_data *data = (struct gpio_button_data *)_data;
+ struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
- schedule_work(&data->work);
+ schedule_work(&bdata->work);
}
-static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
+static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
- struct gpio_keys_button *button = bdata->button;
- BUG_ON(irq != gpio_to_irq(button->gpio));
+ BUG_ON(irq != bdata->irq);
if (bdata->timer_debounce)
mod_timer(&bdata->timer,
@@ -367,50 +370,133 @@ static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void gpio_keys_irq_timer(unsigned long _data)
+{
+ struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
+ struct input_dev *input = bdata->input;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdata->lock, flags);
+ if (bdata->key_pressed) {
+ input_event(input, EV_KEY, bdata->button->code, 0);
+ input_sync(input);
+ bdata->key_pressed = false;
+ }
+ spin_unlock_irqrestore(&bdata->lock, flags);
+}
+
+static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
+{
+ struct gpio_button_data *bdata = dev_id;
+ const struct gpio_keys_button *button = bdata->button;
+ struct input_dev *input = bdata->input;
+ unsigned long flags;
+
+ BUG_ON(irq != bdata->irq);
+
+ spin_lock_irqsave(&bdata->lock, flags);
+
+ if (!bdata->key_pressed) {
+ input_event(input, EV_KEY, button->code, 1);
+ input_sync(input);
+
+ if (!bdata->timer_debounce) {
+ input_event(input, EV_KEY, button->code, 0);
+ input_sync(input);
+ goto out;
+ }
+
+ bdata->key_pressed = true;
+ }
+
+ if (bdata->timer_debounce)
+ mod_timer(&bdata->timer,
+ jiffies + msecs_to_jiffies(bdata->timer_debounce));
+out:
+ spin_unlock_irqrestore(&bdata->lock, flags);
+ return IRQ_HANDLED;
+}
+
static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
+ struct input_dev *input,
struct gpio_button_data *bdata,
- struct gpio_keys_button *button)
+ const struct gpio_keys_button *button)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
+ irq_handler_t isr;
unsigned long irqflags;
int irq, error;
- setup_timer(&bdata->timer, gpio_keys_timer, (unsigned long)bdata);
- INIT_WORK(&bdata->work, gpio_keys_work_func);
+ bdata->input = input;
+ bdata->button = button;
+ spin_lock_init(&bdata->lock);
- error = gpio_request(button->gpio, desc);
- if (error < 0) {
- dev_err(dev, "failed to request GPIO %d, error %d\n",
- button->gpio, error);
- goto fail2;
- }
+ if (gpio_is_valid(button->gpio)) {
- error = gpio_direction_input(button->gpio);
- if (error < 0) {
- dev_err(dev, "failed to configure"
- " direction for GPIO %d, error %d\n",
- button->gpio, error);
- goto fail3;
- }
+ error = gpio_request(button->gpio, desc);
+ if (error < 0) {
+ dev_err(dev, "Failed to request GPIO %d, error %d\n",
+ button->gpio, error);
+ return error;
+ }
- if (button->debounce_interval) {
- error = gpio_set_debounce(button->gpio,
- button->debounce_interval * 1000);
- /* use timer if gpiolib doesn't provide debounce */
- if (error < 0)
- bdata->timer_debounce = button->debounce_interval;
- }
+ error = gpio_direction_input(button->gpio);
+ if (error < 0) {
+ dev_err(dev,
+ "Failed to configure direction for GPIO %d, error %d\n",
+ button->gpio, error);
+ goto fail;
+ }
- irq = gpio_to_irq(button->gpio);
- if (irq < 0) {
- error = irq;
- dev_err(dev, "Unable to get irq number for GPIO %d, error %d\n",
- button->gpio, error);
- goto fail3;
+ if (button->debounce_interval) {
+ error = gpio_set_debounce(button->gpio,
+ button->debounce_interval * 1000);
+ /* use timer if gpiolib doesn't provide debounce */
+ if (error < 0)
+ bdata->timer_debounce =
+ button->debounce_interval;
+ }
+
+ irq = gpio_to_irq(button->gpio);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev,
+ "Unable to get irq number for GPIO %d, error %d\n",
+ button->gpio, error);
+ goto fail;
+ }
+ bdata->irq = irq;
+
+ INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
+ setup_timer(&bdata->timer,
+ gpio_keys_gpio_timer, (unsigned long)bdata);
+
+ isr = gpio_keys_gpio_isr;
+ irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+ } else {
+ if (!button->irq) {
+ dev_err(dev, "No IRQ specified\n");
+ return -EINVAL;
+ }
+ bdata->irq = button->irq;
+
+ if (button->type && button->type != EV_KEY) {
+ dev_err(dev, "Only EV_KEY allowed for IRQ buttons.\n");
+ return -EINVAL;
+ }
+
+ bdata->timer_debounce = button->debounce_interval;
+ setup_timer(&bdata->timer,
+ gpio_keys_irq_timer, (unsigned long)bdata);
+
+ isr = gpio_keys_irq_isr;
+ irqflags = 0;
}
- irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ input_set_capability(input, button->type ?: EV_KEY, button->code);
+
/*
* If platform has specified that the button can be disabled,
* we don't want it to share the interrupt line.
@@ -418,18 +504,19 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
if (!button->can_disable)
irqflags |= IRQF_SHARED;
- error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata);
+ error = request_any_context_irq(bdata->irq, isr, irqflags, desc, bdata);
if (error < 0) {
dev_err(dev, "Unable to claim irq %d; error %d\n",
- irq, error);
- goto fail3;
+ bdata->irq, error);
+ goto fail;
}
return 0;
-fail3:
- gpio_free(button->gpio);
-fail2:
+fail:
+ if (gpio_is_valid(button->gpio))
+ gpio_free(button->gpio);
+
return error;
}
@@ -547,9 +634,19 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
#endif
+static void gpio_remove_key(struct gpio_button_data *bdata)
+{
+ free_irq(bdata->irq, bdata);
+ if (bdata->timer_debounce)
+ del_timer_sync(&bdata->timer);
+ cancel_work_sync(&bdata->work);
+ if (gpio_is_valid(bdata->button->gpio))
+ gpio_free(bdata->button->gpio);
+}
+
static int __devinit gpio_keys_probe(struct platform_device *pdev)
{
- struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
+ const struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
struct gpio_keys_drvdata *ddata;
struct device *dev = &pdev->dev;
struct gpio_keys_platform_data alt_pdata;
@@ -599,21 +696,15 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
__set_bit(EV_REP, input->evbit);
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_button_data *bdata = &ddata->data[i];
- unsigned int type = button->type ?: EV_KEY;
-
- bdata->input = input;
- bdata->button = button;
- error = gpio_keys_setup_key(pdev, bdata, button);
+ error = gpio_keys_setup_key(pdev, input, bdata, button);
if (error)
goto fail2;
if (button->wakeup)
wakeup = 1;
-
- input_set_capability(input, type, button->code);
}
error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group);
@@ -630,9 +721,12 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
goto fail3;
}
- /* get current state of buttons */
- for (i = 0; i < pdata->nbuttons; i++)
- gpio_keys_report_event(&ddata->data[i]);
+ /* get current state of buttons that are connected to GPIOs */
+ for (i = 0; i < pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (gpio_is_valid(bdata->button->gpio))
+ gpio_keys_gpio_report_event(bdata);
+ }
input_sync(input);
device_init_wakeup(&pdev->dev, wakeup);
@@ -642,13 +736,8 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
fail3:
sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
fail2:
- while (--i >= 0) {
- free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]);
- if (ddata->data[i].timer_debounce)
- del_timer_sync(&ddata->data[i].timer);
- cancel_work_sync(&ddata->data[i].work);
- gpio_free(pdata->buttons[i].gpio);
- }
+ while (--i >= 0)
+ gpio_remove_key(&ddata->data[i]);
platform_set_drvdata(pdev, NULL);
fail1:
@@ -671,14 +760,8 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
- for (i = 0; i < ddata->n_buttons; i++) {
- int irq = gpio_to_irq(ddata->data[i].button->gpio);
- free_irq(irq, &ddata->data[i]);
- if (ddata->data[i].timer_debounce)
- del_timer_sync(&ddata->data[i].timer);
- cancel_work_sync(&ddata->data[i].work);
- gpio_free(ddata->data[i].button->gpio);
- }
+ for (i = 0; i < ddata->n_buttons; i++)
+ gpio_remove_key(&ddata->data[i]);
input_unregister_device(input);
@@ -703,11 +786,9 @@ static int gpio_keys_suspend(struct device *dev)
if (device_may_wakeup(dev)) {
for (i = 0; i < ddata->n_buttons; i++) {
- struct gpio_keys_button *button = ddata->data[i].button;
- if (button->wakeup) {
- int irq = gpio_to_irq(button->gpio);
- enable_irq_wake(irq);
- }
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (bdata->button->wakeup)
+ enable_irq_wake(bdata->irq);
}
}
@@ -720,14 +801,12 @@ static int gpio_keys_resume(struct device *dev)
int i;
for (i = 0; i < ddata->n_buttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (bdata->button->wakeup && device_may_wakeup(dev))
+ disable_irq_wake(bdata->irq);
- struct gpio_keys_button *button = ddata->data[i].button;
- if (button->wakeup && device_may_wakeup(dev)) {
- int irq = gpio_to_irq(button->gpio);
- disable_irq_wake(irq);
- }
-
- gpio_keys_report_event(&ddata->data[i]);
+ if (gpio_is_valid(bdata->button->gpio))
+ gpio_keys_gpio_report_event(bdata);
}
input_sync(ddata->input);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 21c42f852343..fe4ac95ca6c8 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -630,6 +630,7 @@ tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
if (!np)
return NULL;
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 2a77a52d2e62..a977bfaa6821 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2012 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/ctype.h>
#include <linux/libps2.h>
#include <linux/serio.h>
@@ -36,6 +37,9 @@
#define FSP_CMD_TIMEOUT 200
#define FSP_CMD_TIMEOUT2 30
+#define GET_ABS_X(packet) ((packet[1] << 2) | ((packet[3] >> 2) & 0x03))
+#define GET_ABS_Y(packet) ((packet[2] << 2) | (packet[3] & 0x03))
+
/** Driver version. */
static const char fsp_drv_ver[] = "1.0.0-K";
@@ -128,8 +132,9 @@ static int fsp_reg_read(struct psmouse *psmouse, int reg_addr, int *reg_val)
out:
ps2_end_command(ps2dev);
psmouse_activate(psmouse);
- dev_dbg(&ps2dev->serio->dev, "READ REG: 0x%02x is 0x%02x (rc = %d)\n",
- reg_addr, *reg_val, rc);
+ psmouse_dbg(psmouse,
+ "READ REG: 0x%02x is 0x%02x (rc = %d)\n",
+ reg_addr, *reg_val, rc);
return rc;
}
@@ -179,8 +184,9 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
out:
ps2_end_command(ps2dev);
- dev_dbg(&ps2dev->serio->dev, "WRITE REG: 0x%02x to 0x%02x (rc = %d)\n",
- reg_addr, reg_val, rc);
+ psmouse_dbg(psmouse,
+ "WRITE REG: 0x%02x to 0x%02x (rc = %d)\n",
+ reg_addr, reg_val, rc);
return rc;
}
@@ -237,8 +243,9 @@ static int fsp_page_reg_read(struct psmouse *psmouse, int *reg_val)
out:
ps2_end_command(ps2dev);
psmouse_activate(psmouse);
- dev_dbg(&ps2dev->serio->dev, "READ PAGE REG: 0x%02x (rc = %d)\n",
- *reg_val, rc);
+ psmouse_dbg(psmouse,
+ "READ PAGE REG: 0x%02x (rc = %d)\n",
+ *reg_val, rc);
return rc;
}
@@ -274,8 +281,9 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
out:
ps2_end_command(ps2dev);
- dev_dbg(&ps2dev->serio->dev, "WRITE PAGE REG: to 0x%02x (rc = %d)\n",
- reg_val, rc);
+ psmouse_dbg(psmouse,
+ "WRITE PAGE REG: to 0x%02x (rc = %d)\n",
+ reg_val, rc);
return rc;
}
@@ -319,7 +327,7 @@ static int fsp_opc_tag_enable(struct psmouse *psmouse, bool enable)
int res = 0;
if (fsp_reg_read(psmouse, FSP_REG_OPC_QDOWN, &v) == -1) {
- dev_err(&psmouse->ps2dev.serio->dev, "Unable get OPC state.\n");
+ psmouse_err(psmouse, "Unable get OPC state.\n");
return -EIO;
}
@@ -336,8 +344,7 @@ static int fsp_opc_tag_enable(struct psmouse *psmouse, bool enable)
}
if (res != 0) {
- dev_err(&psmouse->ps2dev.serio->dev,
- "Unable to enable OPC tag.\n");
+ psmouse_err(psmouse, "Unable to enable OPC tag.\n");
res = -EIO;
}
@@ -615,18 +622,40 @@ static struct attribute_group fsp_attribute_group = {
.attrs = fsp_attributes,
};
-#ifdef FSP_DEBUG
-static void fsp_packet_debug(unsigned char packet[])
+#ifdef FSP_DEBUG
+static void fsp_packet_debug(struct psmouse *psmouse, unsigned char packet[])
{
static unsigned int ps2_packet_cnt;
static unsigned int ps2_last_second;
unsigned int jiffies_msec;
+ const char *packet_type = "UNKNOWN";
+ unsigned short abs_x = 0, abs_y = 0;
+
+ /* Interpret & dump the packet data. */
+ switch (packet[0] >> FSP_PKT_TYPE_SHIFT) {
+ case FSP_PKT_TYPE_ABS:
+ packet_type = "Absolute";
+ abs_x = GET_ABS_X(packet);
+ abs_y = GET_ABS_Y(packet);
+ break;
+ case FSP_PKT_TYPE_NORMAL:
+ packet_type = "Normal";
+ break;
+ case FSP_PKT_TYPE_NOTIFY:
+ packet_type = "Notify";
+ break;
+ case FSP_PKT_TYPE_NORMAL_OPC:
+ packet_type = "Normal-OPC";
+ break;
+ }
ps2_packet_cnt++;
jiffies_msec = jiffies_to_msecs(jiffies);
psmouse_dbg(psmouse,
- "%08dms PS/2 packets: %02x, %02x, %02x, %02x\n",
- jiffies_msec, packet[0], packet[1], packet[2], packet[3]);
+ "%08dms %s packets: %02x, %02x, %02x, %02x; "
+ "abs_x: %d, abs_y: %d\n",
+ jiffies_msec, packet_type,
+ packet[0], packet[1], packet[2], packet[3], abs_x, abs_y);
if (jiffies_msec - ps2_last_second > 1000) {
psmouse_dbg(psmouse, "PS/2 packets/sec = %d\n", ps2_packet_cnt);
@@ -635,17 +664,29 @@ static void fsp_packet_debug(unsigned char packet[])
}
}
#else
-static void fsp_packet_debug(unsigned char packet[])
+static void fsp_packet_debug(struct psmouse *psmouse, unsigned char packet[])
{
}
#endif
+static void fsp_set_slot(struct input_dev *dev, int slot, bool active,
+ unsigned int x, unsigned int y)
+{
+ input_mt_slot(dev, slot);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+ if (active) {
+ input_report_abs(dev, ABS_MT_POSITION_X, x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, y);
+ }
+}
+
static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
struct fsp_data *ad = psmouse->private;
unsigned char *packet = psmouse->packet;
unsigned char button_status = 0, lscroll = 0, rscroll = 0;
+ unsigned short abs_x, abs_y, fgrs = 0;
int rel_x, rel_y;
if (psmouse->pktcnt < 4)
@@ -655,16 +696,76 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
* Full packet accumulated, process it
*/
+ fsp_packet_debug(psmouse, packet);
+
switch (psmouse->packet[0] >> FSP_PKT_TYPE_SHIFT) {
case FSP_PKT_TYPE_ABS:
- dev_warn(&psmouse->ps2dev.serio->dev,
- "Unexpected absolute mode packet, ignored.\n");
+ abs_x = GET_ABS_X(packet);
+ abs_y = GET_ABS_Y(packet);
+
+ if (packet[0] & FSP_PB0_MFMC) {
+ /*
+ * MFMC packet: assume that there are two fingers on
+ * pad
+ */
+ fgrs = 2;
+
+ /* MFMC packet */
+ if (packet[0] & FSP_PB0_MFMC_FGR2) {
+ /* 2nd finger */
+ if (ad->last_mt_fgr == 2) {
+ /*
+ * workaround for buggy firmware
+ * which doesn't clear MFMC bit if
+ * the 1st finger is up
+ */
+ fgrs = 1;
+ fsp_set_slot(dev, 0, false, 0, 0);
+ }
+ ad->last_mt_fgr = 2;
+
+ fsp_set_slot(dev, 1, fgrs == 2, abs_x, abs_y);
+ } else {
+ /* 1st finger */
+ if (ad->last_mt_fgr == 1) {
+ /*
+ * workaround for buggy firmware
+ * which doesn't clear MFMC bit if
+ * the 2nd finger is up
+ */
+ fgrs = 1;
+ fsp_set_slot(dev, 1, false, 0, 0);
+ }
+ ad->last_mt_fgr = 1;
+ fsp_set_slot(dev, 0, fgrs != 0, abs_x, abs_y);
+ }
+ } else {
+ /* SFAC packet */
+
+ /* no multi-finger information */
+ ad->last_mt_fgr = 0;
+
+ if (abs_x != 0 && abs_y != 0)
+ fgrs = 1;
+
+ fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y);
+ fsp_set_slot(dev, 1, false, 0, 0);
+ }
+ if (fgrs > 0) {
+ input_report_abs(dev, ABS_X, abs_x);
+ input_report_abs(dev, ABS_Y, abs_y);
+ }
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ input_report_key(dev, BTN_TOUCH, fgrs);
+ input_report_key(dev, BTN_TOOL_FINGER, fgrs == 1);
+ input_report_key(dev, BTN_TOOL_DOUBLETAP, fgrs == 2);
break;
case FSP_PKT_TYPE_NORMAL_OPC:
/* on-pad click, filter it if necessary */
if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC)
- packet[0] &= ~BIT(0);
+ packet[0] &= ~FSP_PB0_LBTN;
/* fall through */
case FSP_PKT_TYPE_NORMAL:
@@ -711,8 +812,6 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
input_sync(dev);
- fsp_packet_debug(packet);
-
return PSMOUSE_FULL_PACKET;
}
@@ -736,42 +835,106 @@ static int fsp_activate_protocol(struct psmouse *psmouse)
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 0x04) {
- dev_err(&psmouse->ps2dev.serio->dev,
- "Unable to enable 4 bytes packet format.\n");
+ psmouse_err(psmouse,
+ "Unable to enable 4 bytes packet format.\n");
return -EIO;
}
- if (fsp_reg_read(psmouse, FSP_REG_SYSCTL5, &val)) {
- dev_err(&psmouse->ps2dev.serio->dev,
- "Unable to read SYSCTL5 register.\n");
- return -EIO;
- }
+ if (pad->ver < FSP_VER_STL3888_C0) {
+ /* Preparing relative coordinates output for older hardware */
+ if (fsp_reg_read(psmouse, FSP_REG_SYSCTL5, &val)) {
+ psmouse_err(psmouse,
+ "Unable to read SYSCTL5 register.\n");
+ return -EIO;
+ }
- val &= ~(FSP_BIT_EN_MSID7 | FSP_BIT_EN_MSID8 | FSP_BIT_EN_AUTO_MSID8);
- /* Ensure we are not in absolute mode */
- val &= ~FSP_BIT_EN_PKT_G0;
- if (pad->buttons == 0x06) {
- /* Left/Middle/Right & Scroll Up/Down/Right/Left */
- val |= FSP_BIT_EN_MSID6;
- }
+ if (fsp_get_buttons(psmouse, &pad->buttons)) {
+ psmouse_err(psmouse,
+ "Unable to retrieve number of buttons.\n");
+ return -EIO;
+ }
- if (fsp_reg_write(psmouse, FSP_REG_SYSCTL5, val)) {
- dev_err(&psmouse->ps2dev.serio->dev,
- "Unable to set up required mode bits.\n");
- return -EIO;
+ val &= ~(FSP_BIT_EN_MSID7 | FSP_BIT_EN_MSID8 | FSP_BIT_EN_AUTO_MSID8);
+ /* Ensure we are not in absolute mode */
+ val &= ~FSP_BIT_EN_PKT_G0;
+ if (pad->buttons == 0x06) {
+ /* Left/Middle/Right & Scroll Up/Down/Right/Left */
+ val |= FSP_BIT_EN_MSID6;
+ }
+
+ if (fsp_reg_write(psmouse, FSP_REG_SYSCTL5, val)) {
+ psmouse_err(psmouse,
+ "Unable to set up required mode bits.\n");
+ return -EIO;
+ }
+
+ /*
+ * Enable OPC tags such that driver can tell the difference
+ * between on-pad and real button click
+ */
+ if (fsp_opc_tag_enable(psmouse, true))
+ psmouse_warn(psmouse,
+ "Failed to enable OPC tag mode.\n");
+ /* enable on-pad click by default */
+ pad->flags |= FSPDRV_FLAG_EN_OPC;
+
+ /* Enable on-pad vertical and horizontal scrolling */
+ fsp_onpad_vscr(psmouse, true);
+ fsp_onpad_hscr(psmouse, true);
+ } else {
+ /* Enable absolute coordinates output for Cx/Dx hardware */
+ if (fsp_reg_write(psmouse, FSP_REG_SWC1,
+ FSP_BIT_SWC1_EN_ABS_1F |
+ FSP_BIT_SWC1_EN_ABS_2F |
+ FSP_BIT_SWC1_EN_FUP_OUT |
+ FSP_BIT_SWC1_EN_ABS_CON)) {
+ psmouse_err(psmouse,
+ "Unable to enable absolute coordinates output.\n");
+ return -EIO;
+ }
}
- /*
- * Enable OPC tags such that driver can tell the difference between
- * on-pad and real button click
- */
- if (fsp_opc_tag_enable(psmouse, true))
- dev_warn(&psmouse->ps2dev.serio->dev,
- "Failed to enable OPC tag mode.\n");
+ return 0;
+}
- /* Enable on-pad vertical and horizontal scrolling */
- fsp_onpad_vscr(psmouse, true);
- fsp_onpad_hscr(psmouse, true);
+static int fsp_set_input_params(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct fsp_data *pad = psmouse->private;
+
+ if (pad->ver < FSP_VER_STL3888_C0) {
+ __set_bit(BTN_MIDDLE, dev->keybit);
+ __set_bit(BTN_BACK, dev->keybit);
+ __set_bit(BTN_FORWARD, dev->keybit);
+ __set_bit(REL_WHEEL, dev->relbit);
+ __set_bit(REL_HWHEEL, dev->relbit);
+ } else {
+ /*
+ * Hardware prior to Cx performs much better in relative mode;
+ * hence, only enable absolute coordinates output as well as
+ * multi-touch output for the newer hardware.
+ *
+ * Maximum coordinates can be computed as:
+ *
+ * number of scanlines * 64 - 57
+ *
+ * where number of X/Y scanline lines are 16/12.
+ */
+ int abs_x = 967, abs_y = 711;
+
+ __set_bit(EV_ABS, dev->evbit);
+ __clear_bit(EV_REL, dev->evbit);
+ __set_bit(BTN_TOUCH, dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
+ __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
+
+ input_set_abs_params(dev, ABS_X, 0, abs_x, 0, 0);
+ input_set_abs_params(dev, ABS_Y, 0, abs_y, 0, 0);
+ input_mt_init_slots(dev, 2);
+ input_set_abs_params(dev, ABS_MT_POSITION_X, 0, abs_x, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y, 0, abs_y, 0, 0);
+ }
return 0;
}
@@ -829,18 +992,16 @@ static int fsp_reconnect(struct psmouse *psmouse)
int fsp_init(struct psmouse *psmouse)
{
struct fsp_data *priv;
- int ver, rev, buttons;
+ int ver, rev;
int error;
if (fsp_get_version(psmouse, &ver) ||
- fsp_get_revision(psmouse, &rev) ||
- fsp_get_buttons(psmouse, &buttons)) {
+ fsp_get_revision(psmouse, &rev)) {
return -ENODEV;
}
- psmouse_info(psmouse,
- "Finger Sensing Pad, hw: %d.%d.%d, sw: %s, buttons: %d\n",
- ver >> 4, ver & 0x0F, rev, fsp_drv_ver, buttons & 7);
+ psmouse_info(psmouse, "Finger Sensing Pad, hw: %d.%d.%d, sw: %s\n",
+ ver >> 4, ver & 0x0F, rev, fsp_drv_ver);
psmouse->private = priv = kzalloc(sizeof(struct fsp_data), GFP_KERNEL);
if (!priv)
@@ -848,17 +1009,6 @@ int fsp_init(struct psmouse *psmouse)
priv->ver = ver;
priv->rev = rev;
- priv->buttons = buttons;
-
- /* enable on-pad click by default */
- priv->flags |= FSPDRV_FLAG_EN_OPC;
-
- /* Set up various supported input event bits */
- __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
- __set_bit(BTN_BACK, psmouse->dev->keybit);
- __set_bit(BTN_FORWARD, psmouse->dev->keybit);
- __set_bit(REL_WHEEL, psmouse->dev->relbit);
- __set_bit(REL_HWHEEL, psmouse->dev->relbit);
psmouse->protocol_handler = fsp_process_byte;
psmouse->disconnect = fsp_disconnect;
@@ -866,16 +1016,20 @@ int fsp_init(struct psmouse *psmouse)
psmouse->cleanup = fsp_reset;
psmouse->pktsize = 4;
- /* set default packet output based on number of buttons we found */
error = fsp_activate_protocol(psmouse);
if (error)
goto err_out;
+ /* Set up various supported input event bits */
+ error = fsp_set_input_params(psmouse);
+ if (error)
+ goto err_out;
+
error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj,
&fsp_attribute_group);
if (error) {
- dev_err(&psmouse->ps2dev.serio->dev,
- "Failed to create sysfs attributes (%d)", error);
+ psmouse_err(psmouse,
+ "Failed to create sysfs attributes (%d)", error);
goto err_out;
}
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index 2e4af24f8c15..334de19e5ddb 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2012 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -55,6 +55,16 @@
#define FSP_BIT_FIX_HSCR BIT(5)
#define FSP_BIT_DRAG_LOCK BIT(6)
+#define FSP_REG_SWC1 (0x90)
+#define FSP_BIT_SWC1_EN_ABS_1F BIT(0)
+#define FSP_BIT_SWC1_EN_GID BIT(1)
+#define FSP_BIT_SWC1_EN_ABS_2F BIT(2)
+#define FSP_BIT_SWC1_EN_FUP_OUT BIT(3)
+#define FSP_BIT_SWC1_EN_ABS_CON BIT(4)
+#define FSP_BIT_SWC1_GST_GRP0 BIT(5)
+#define FSP_BIT_SWC1_GST_GRP1 BIT(6)
+#define FSP_BIT_SWC1_BX_COMPAT BIT(7)
+
/* Finger-sensing Pad packet formating related definitions */
/* absolute packet type */
@@ -64,12 +74,32 @@
#define FSP_PKT_TYPE_NORMAL_OPC (0x03)
#define FSP_PKT_TYPE_SHIFT (6)
+/* bit definitions for the first byte of report packet */
+#define FSP_PB0_LBTN BIT(0)
+#define FSP_PB0_RBTN BIT(1)
+#define FSP_PB0_MBTN BIT(2)
+#define FSP_PB0_MFMC_FGR2 FSP_PB0_MBTN
+#define FSP_PB0_MUST_SET BIT(3)
+#define FSP_PB0_PHY_BTN BIT(4)
+#define FSP_PB0_MFMC BIT(5)
+
+/* hardware revisions */
+#define FSP_VER_STL3888_A4 (0xC1)
+#define FSP_VER_STL3888_B0 (0xD0)
+#define FSP_VER_STL3888_B1 (0xD1)
+#define FSP_VER_STL3888_B2 (0xD2)
+#define FSP_VER_STL3888_C0 (0xE0)
+#define FSP_VER_STL3888_C1 (0xE1)
+#define FSP_VER_STL3888_D0 (0xE2)
+#define FSP_VER_STL3888_D1 (0xE3)
+#define FSP_VER_STL3888_E0 (0xE4)
+
#ifdef __KERNEL__
struct fsp_data {
unsigned char ver; /* hardware version */
unsigned char rev; /* hardware revison */
- unsigned char buttons; /* Number of buttons */
+ unsigned int buttons; /* Number of buttons */
unsigned int flags;
#define FSPDRV_FLAG_EN_OPC (0x001) /* enable on-pad clicking */
@@ -78,6 +108,7 @@ struct fsp_data {
unsigned char last_reg; /* Last register we requested read from */
unsigned char last_val;
+ unsigned int last_mt_fgr; /* Last seen finger(multitouch) */
};
#ifdef CONFIG_MOUSE_PS2_SENTELIC
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index bd5b10eeeb40..f5fbdf94de3b 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -184,7 +184,7 @@ module_init(ams_delta_serio_init);
static void __exit ams_delta_serio_exit(void)
{
serio_unregister_port(ams_delta_serio);
- free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
+ free_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
gpio_free_array(ams_delta_gpios,
ARRAY_SIZE(ams_delta_gpios));
}
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index e53f4081a586..bed7cbf84cfd 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -76,6 +76,7 @@ config TABLET_USB_KBTAB
config TABLET_USB_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_ARCH_HAS_HCD
+ select POWER_SUPPLY
select USB
select NEW_LEDS
select LEDS_CLASS
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 0783864a7dc2..b4842d0e61dd 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -88,6 +88,7 @@
#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/usb/input.h>
+#include <linux/power_supply.h>
#include <asm/unaligned.h>
/*
@@ -112,6 +113,7 @@ struct wacom {
struct urb *irq;
struct wacom_wac wacom_wac;
struct mutex lock;
+ struct work_struct work;
bool open;
char phys[32];
struct wacom_led {
@@ -120,8 +122,15 @@ struct wacom {
u8 hlv; /* status led brightness button pressed (1..127) */
u8 img_lum; /* OLED matrix display brightness */
} led;
+ struct power_supply battery;
};
+static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
+{
+ struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+ schedule_work(&wacom->work);
+}
+
extern const struct usb_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index ca28066dc81e..0d269212931e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -167,6 +167,19 @@ static void wacom_close(struct input_dev *dev)
usb_autopm_put_interface(wacom->intf);
}
+/*
+ * Static values for max X/Y and resolution of Pen interface is stored in
+ * features. This mean physical size of active area can be computed.
+ * This is useful to do when Pen and Touch have same active area of tablet.
+ * This means for Touch device, we only need to find max X/Y value and we
+ * have enough information to compute resolution of touch.
+ */
+static void wacom_set_phy_from_res(struct wacom_features *features)
+{
+ features->x_phy = (features->x_max * 100) / features->x_resolution;
+ features->y_phy = (features->y_max * 100) / features->y_resolution;
+}
+
static int wacom_parse_logical_collection(unsigned char *report,
struct wacom_features *features)
{
@@ -178,15 +191,7 @@ static int wacom_parse_logical_collection(unsigned char *report,
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
features->device_type = BTN_TOOL_FINGER;
- /*
- * Stylus and Touch have same active area
- * so compute physical size based on stylus
- * data before its overwritten.
- */
- features->x_phy =
- (features->x_max * 100) / features->x_resolution;
- features->y_phy =
- (features->y_max * 100) / features->y_resolution;
+ wacom_set_phy_from_res(features);
features->x_max = features->y_max =
get_unaligned_le16(&report[10]);
@@ -422,6 +427,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
report_id, rep_data, 4, 1);
} while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
} else if (features->type != TABLETPC &&
+ features->type != WIRELESS &&
features->device_type == BTN_TOOL_PEN) {
do {
rep_data[0] = 2;
@@ -454,6 +460,21 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
features->pressure_fuzz = 0;
features->distance_fuzz = 0;
+ /*
+ * The wireless device HID is basic and layout conflicts with
+ * other tablets (monitor and touch interface can look like pen).
+ * Skip the query for this type and modify defaults based on
+ * interface number.
+ */
+ if (features->type == WIRELESS) {
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
+ features->device_type = 0;
+ } else if (intf->cur_altsetting->desc.bInterfaceNumber == 2) {
+ features->device_type = BTN_TOOL_DOUBLETAP;
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+ }
+ }
+
/* only Tablet PCs and Bamboo P&T need to retrieve the info */
if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
(features->type != BAMBOO_PT))
@@ -822,6 +843,152 @@ static void wacom_destroy_leds(struct wacom *wacom)
}
}
+static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+static int wacom_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom *wacom = container_of(psy, struct wacom, battery);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval =
+ wacom->wacom_wac.battery_capacity * 100 / 31;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int wacom_initialize_battery(struct wacom *wacom)
+{
+ int error = 0;
+
+ if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR) {
+ wacom->battery.properties = wacom_battery_props;
+ wacom->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
+ wacom->battery.get_property = wacom_battery_get_property;
+ wacom->battery.name = "wacom_battery";
+ wacom->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wacom->battery.use_for_apm = 0;
+
+ error = power_supply_register(&wacom->usbdev->dev,
+ &wacom->battery);
+ }
+
+ return error;
+}
+
+static void wacom_destroy_battery(struct wacom *wacom)
+{
+ if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR)
+ power_supply_unregister(&wacom->battery);
+}
+
+static int wacom_register_input(struct wacom *wacom)
+{
+ struct input_dev *input_dev;
+ struct usb_interface *intf = wacom->intf;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error;
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ input_dev->name = wacom_wac->name;
+ input_dev->dev.parent = &intf->dev;
+ input_dev->open = wacom_open;
+ input_dev->close = wacom_close;
+ usb_to_input_id(dev, &input_dev->id);
+ input_set_drvdata(input_dev, wacom);
+
+ wacom_wac->input = input_dev;
+ wacom_setup_input_capabilities(input_dev, wacom_wac);
+
+ error = input_register_device(input_dev);
+ if (error) {
+ input_free_device(input_dev);
+ wacom_wac->input = NULL;
+ }
+
+ return error;
+}
+
+static void wacom_wireless_work(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, work);
+ struct usb_device *usbdev = wacom->usbdev;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ /*
+ * Regardless if this is a disconnect or a new tablet,
+ * remove any existing input devices.
+ */
+
+ /* Stylus interface */
+ wacom = usb_get_intfdata(usbdev->config->interface[1]);
+ if (wacom->wacom_wac.input)
+ input_unregister_device(wacom->wacom_wac.input);
+ wacom->wacom_wac.input = 0;
+
+ /* Touch interface */
+ wacom = usb_get_intfdata(usbdev->config->interface[2]);
+ if (wacom->wacom_wac.input)
+ input_unregister_device(wacom->wacom_wac.input);
+ wacom->wacom_wac.input = 0;
+
+ if (wacom_wac->pid == 0) {
+ printk(KERN_INFO "wacom: wireless tablet disconnected\n");
+ } else {
+ const struct usb_device_id *id = wacom_ids;
+
+ printk(KERN_INFO
+ "wacom: wireless tablet connected with PID %x\n",
+ wacom_wac->pid);
+
+ while (id->match_flags) {
+ if (id->idVendor == USB_VENDOR_ID_WACOM &&
+ id->idProduct == wacom_wac->pid)
+ break;
+ id++;
+ }
+
+ if (!id->match_flags) {
+ printk(KERN_INFO
+ "wacom: ignorning unknown PID.\n");
+ return;
+ }
+
+ /* Stylus interface */
+ wacom = usb_get_intfdata(usbdev->config->interface[1]);
+ wacom_wac = &wacom->wacom_wac;
+ wacom_wac->features =
+ *((struct wacom_features *)id->driver_info);
+ wacom_wac->features.device_type = BTN_TOOL_PEN;
+ wacom_register_input(wacom);
+
+ /* Touch interface */
+ wacom = usb_get_intfdata(usbdev->config->interface[2]);
+ wacom_wac = &wacom->wacom_wac;
+ wacom_wac->features =
+ *((struct wacom_features *)id->driver_info);
+ wacom_wac->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_wac->features.device_type = BTN_TOOL_FINGER;
+ wacom_set_phy_from_res(&wacom_wac->features);
+ wacom_wac->features.x_max = wacom_wac->features.y_max = 4096;
+ wacom_register_input(wacom);
+ }
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -829,18 +996,14 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
- struct input_dev *input_dev;
int error;
if (!id->driver_info)
return -EINVAL;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!wacom || !input_dev) {
- error = -ENOMEM;
- goto fail1;
- }
+ if (!wacom)
+ return -ENOMEM;
wacom_wac = &wacom->wacom_wac;
wacom_wac->features = *((struct wacom_features *)id->driver_info);
@@ -866,11 +1029,10 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
+ INIT_WORK(&wacom->work, wacom_wireless_work);
usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
- wacom_wac->input = input_dev;
-
endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Retrieve the physical and logical size for OEM devices */
@@ -894,15 +1056,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
goto fail3;
}
- input_dev->name = wacom_wac->name;
- input_dev->dev.parent = &intf->dev;
- input_dev->open = wacom_open;
- input_dev->close = wacom_close;
- usb_to_input_id(dev, &input_dev->id);
- input_set_drvdata(input_dev, wacom);
-
- wacom_setup_input_capabilities(input_dev, wacom_wac);
-
usb_fill_int_urb(wacom->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
wacom_wac->data, features->pktlen,
@@ -914,22 +1067,34 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
if (error)
goto fail4;
- error = input_register_device(input_dev);
+ error = wacom_initialize_battery(wacom);
if (error)
goto fail5;
+ if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
+ error = wacom_register_input(wacom);
+ if (error)
+ goto fail6;
+ }
+
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(intf, features);
usb_set_intfdata(intf, wacom);
+
+ if (features->quirks & WACOM_QUIRK_MONITOR) {
+ if (usb_submit_urb(wacom->irq, GFP_KERNEL))
+ goto fail5;
+ }
+
return 0;
+ fail6: wacom_destroy_battery(wacom);
fail5: wacom_destroy_leds(wacom);
fail4: wacom_remove_shared_data(wacom_wac);
fail3: usb_free_urb(wacom->irq);
fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
- fail1: input_free_device(input_dev);
- kfree(wacom);
+ fail1: kfree(wacom);
return error;
}
@@ -940,7 +1105,10 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_kill_urb(wacom->irq);
- input_unregister_device(wacom->wacom_wac.input);
+ cancel_work_sync(&wacom->work);
+ if (wacom->wacom_wac.input)
+ input_unregister_device(wacom->wacom_wac.input);
+ wacom_destroy_battery(wacom);
wacom_destroy_leds(wacom);
usb_free_urb(wacom->irq);
usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
@@ -972,7 +1140,8 @@ static int wacom_resume(struct usb_interface *intf)
wacom_query_tablet_data(intf, features);
wacom_led_control(wacom);
- if (wacom->open && usb_submit_urb(wacom->irq, GFP_NOIO) < 0)
+ if ((wacom->open || features->quirks & WACOM_QUIRK_MONITOR)
+ && usb_submit_urb(wacom->irq, GFP_NOIO) < 0)
rv = -EIO;
mutex_unlock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 89a96427faa0..cecd35c8f0b3 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1044,6 +1044,35 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
+{
+ unsigned char *data = wacom->data;
+ int connected;
+
+ if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80)
+ return 0;
+
+ connected = data[1] & 0x01;
+ if (connected) {
+ int pid, battery;
+
+ pid = get_unaligned_be16(&data[6]);
+ battery = data[5] & 0x3f;
+ if (wacom->pid != pid) {
+ wacom->pid = pid;
+ wacom_schedule_work(wacom);
+ }
+ wacom->battery_capacity = battery;
+ } else if (wacom->pid != 0) {
+ /* disconnected while previously connected */
+ wacom->pid = 0;
+ wacom_schedule_work(wacom);
+ wacom->battery_capacity = 0;
+ }
+
+ return 0;
+}
+
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
{
bool sync;
@@ -1094,6 +1123,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_bpt_irq(wacom_wac, len);
break;
+ case WIRELESS:
+ sync = wacom_wireless_irq(wacom_wac, len);
+ break;
+
default:
sync = false;
break;
@@ -1155,7 +1188,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type == TABLETPC || features->type == TABLETPC2FG ||
- features->type == BAMBOO_PT)
+ features->type == BAMBOO_PT || features->type == WIRELESS)
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
/* quirk for bamboo touch with 2 low res touches */
@@ -1167,6 +1200,16 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->y_fuzz <<= 5;
features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES;
}
+
+ if (features->type == WIRELESS) {
+
+ /* monitor never has input and pen/touch have delayed create */
+ features->quirks |= WACOM_QUIRK_NO_INPUT;
+
+ /* must be monitor interface if no device_type set */
+ if (!features->device_type)
+ features->quirks |= WACOM_QUIRK_MONITOR;
+ }
}
static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
@@ -1640,6 +1683,9 @@ static const struct wacom_features wacom_features_0xEC =
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x84 =
+ { "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0,
+ 0, WIRELESS, 0, 0 };
static const struct wacom_features wacom_features_0xD0 =
{ "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1766,6 +1812,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID,
USB_INTERFACE_SUBCLASS_BOOT,
USB_INTERFACE_PROTOCOL_MOUSE) },
+ { USB_DEVICE_WACOM(0x84) },
{ USB_DEVICE_WACOM(0xD0) },
{ USB_DEVICE_WACOM(0xD1) },
{ USB_DEVICE_WACOM(0xD2) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 4f0ba21b0196..ba5a334e54d6 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -24,6 +24,7 @@
#define WACOM_PKGLEN_BBTOUCH 20
#define WACOM_PKGLEN_BBTOUCH3 64
#define WACOM_PKGLEN_BBPEN 10
+#define WACOM_PKGLEN_WIRELESS 32
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -45,6 +46,8 @@
/* device quirks */
#define WACOM_QUIRK_MULTI_INPUT 0x0001
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0002
+#define WACOM_QUIRK_NO_INPUT 0x0004
+#define WACOM_QUIRK_MONITOR 0x0008
enum {
PENPARTNER = 0,
@@ -54,6 +57,7 @@ enum {
PL,
DTU,
BAMBOO_PT,
+ WIRELESS,
INTUOS,
INTUOS3S,
INTUOS3,
@@ -107,6 +111,8 @@ struct wacom_wac {
struct wacom_features features;
struct wacom_shared *shared;
struct input_dev *input;
+ int pid;
+ int battery_capacity;
};
#endif
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
index d6488b79ae3b..bba299dbf396 100644
--- a/drivers/media/video/davinci/vpbe_osd.c
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -28,7 +28,6 @@
#include <linux/clk.h>
#include <linux/slab.h>
-#include <mach/io.h>
#include <mach/cputype.h>
#include <mach/hardware.h>
diff --git a/drivers/media/video/davinci/vpbe_venc.c b/drivers/media/video/davinci/vpbe_venc.c
index 00e80f59d5d5..b21ecc8d134d 100644
--- a/drivers/media/video/davinci/vpbe_venc.c
+++ b/drivers/media/video/davinci/vpbe_venc.c
@@ -27,7 +27,6 @@
#include <mach/hardware.h>
#include <mach/mux.h>
-#include <mach/io.h>
#include <mach/i2c.h>
#include <linux/io.h>
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 74522773e934..93c35ef5f0ad 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -286,7 +286,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
sg_dma_len(sg) = new_size;
- txd = ichan->dma_chan.device->device_prep_slave_sg(
+ txd = dmaengine_prep_slave_sg(
&ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!txd)
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 4ed1c7c28ae7..02194c056b00 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -564,7 +564,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
spin_unlock_irq(&fh->queue_lock);
- desc = fh->chan->device->device_prep_slave_sg(fh->chan,
+ desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 390863e7efbd..9819dc09ce08 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/types.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
@@ -173,6 +174,7 @@ struct atmel_mci {
struct atmel_mci_dma dma;
struct dma_chan *data_chan;
+ struct dma_slave_config dma_conf;
u32 cmd_status;
u32 data_status;
@@ -863,16 +865,17 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
- slave_dirn = DMA_DEV_TO_MEM;
+ host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
} else {
direction = DMA_TO_DEVICE;
- slave_dirn = DMA_MEM_TO_DEV;
+ host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
}
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
- desc = chan->device->device_prep_slave_sg(chan,
+ dmaengine_slave_config(chan, &host->dma_conf);
+ desc = dmaengine_prep_slave_sg(chan,
data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
@@ -1960,10 +1963,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
if (pdata && find_slave_dev(pdata->dma_slave)) {
dma_cap_mask_t mask;
- setup_dma_addr(pdata->dma_slave,
- host->mapbase + ATMCI_TDR,
- host->mapbase + ATMCI_RDR);
-
/* Try to grab a DMA channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -1977,6 +1976,14 @@ static bool atmci_configure_dma(struct atmel_mci *host)
dev_info(&host->pdev->dev,
"using %s for DMA transfers\n",
dma_chan_name(host->dma.chan));
+
+ host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
+ host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.src_maxburst = 1;
+ host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
+ host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.dst_maxburst = 1;
+ host->dma_conf.device_fc = false;
return true;
}
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 983e244eca76..032b84791a16 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/amba/mmci.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -400,6 +401,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .device_fc = false,
};
struct dma_chan *chan;
struct dma_device *device;
@@ -441,7 +443,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
dmaengine_slave_config(chan, &conf);
- desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
conf.direction, DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4184b7946bbf..b2058b432320 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -33,6 +33,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
+#include <linux/types.h>
#include <asm/dma.h>
#include <asm/irq.h>
@@ -254,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
if (nents != data->sg_len)
return -EINVAL;
- host->desc = host->dma->device->device_prep_slave_sg(host->dma,
+ host->desc = dmaengine_prep_slave_sg(host->dma,
data->sg, data->sg_len, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -267,6 +268,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
wmb();
dmaengine_submit(host->desc);
+ dma_async_issue_pending(host->dma);
return 0;
}
@@ -710,6 +712,7 @@ static int mxcmci_setup_dma(struct mmc_host *mmc)
config->src_addr_width = 4;
config->dst_maxburst = host->burstlen;
config->src_maxburst = host->burstlen;
+ config->device_fc = false;
return dmaengine_slave_config(host->dma, config);
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 382c835d217c..b0f2ef988188 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -38,10 +38,10 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
+#include <linux/fsl/mxs-dma.h>
#include <mach/mxs.h>
#include <mach/common.h>
-#include <mach/dma.h>
#include <mach/mmc.h>
#define DRIVER_NAME "mxs-mmc"
@@ -305,7 +305,7 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
}
static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
- struct mxs_mmc_host *host, unsigned int append)
+ struct mxs_mmc_host *host, unsigned long flags)
{
struct dma_async_tx_descriptor *desc;
struct mmc_data *data = host->data;
@@ -324,8 +324,8 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
sg_len = SSP_PIO_NUM;
}
- desc = host->dmach->device->device_prep_slave_sg(host->dmach,
- sgl, sg_len, host->slave_dirn, append);
+ desc = dmaengine_prep_slave_sg(host->dmach,
+ sgl, sg_len, host->slave_dirn, flags);
if (desc) {
desc->callback = mxs_mmc_dma_irq_callback;
desc->callback_param = host;
@@ -358,7 +358,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
host->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, 0);
+ desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
if (!desc)
goto out;
@@ -398,7 +398,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
host->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, 0);
+ desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
if (!desc)
goto out;
@@ -526,7 +526,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
host->data = data;
host->dma_dir = dma_data_dir;
host->slave_dirn = slave_dirn;
- desc = mxs_mmc_prep_dma(host, 1);
+ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto out;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 60f205708f54..aafaf0b6eb1c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
DMA_FROM_DEVICE);
if (ret > 0) {
host->dma_active = true;
- desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
DMA_TO_DEVICE);
if (ret > 0) {
host->dma_active = true;
- desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 8253ec12003e..fff928604859 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -88,7 +88,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
- desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
@@ -169,7 +169,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
- desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 284cf3433720..5760c1a4b3f6 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,9 +304,6 @@ config MTD_OOPS
buffer in a flash partition where it can be read back at some
later point.
- To use, add console=ttyMTDx to the kernel command line,
- where x is the MTD device number to use.
-
config MTD_SWAP
tristate "Swap on MTD device support"
depends on MTD && SWAP
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9bcd1f415f43..dbbd2edfb812 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -87,7 +87,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **
static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
-static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -262,9 +262,9 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
- if (!mtd->point && map_is_linear(map)) {
- mtd->point = cfi_intelext_point;
- mtd->unpoint = cfi_intelext_unpoint;
+ if (!mtd->_point && map_is_linear(map)) {
+ mtd->_point = cfi_intelext_point;
+ mtd->_unpoint = cfi_intelext_unpoint;
}
}
@@ -274,8 +274,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
printk(KERN_INFO "Using buffer write method\n" );
- mtd->write = cfi_intelext_write_buffers;
- mtd->writev = cfi_intelext_writev;
+ mtd->_write = cfi_intelext_write_buffers;
+ mtd->_writev = cfi_intelext_writev;
}
}
@@ -443,15 +443,15 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
- mtd->erase = cfi_intelext_erase_varsize;
- mtd->read = cfi_intelext_read;
- mtd->write = cfi_intelext_write_words;
- mtd->sync = cfi_intelext_sync;
- mtd->lock = cfi_intelext_lock;
- mtd->unlock = cfi_intelext_unlock;
- mtd->is_locked = cfi_intelext_is_locked;
- mtd->suspend = cfi_intelext_suspend;
- mtd->resume = cfi_intelext_resume;
+ mtd->_erase = cfi_intelext_erase_varsize;
+ mtd->_read = cfi_intelext_read;
+ mtd->_write = cfi_intelext_write_words;
+ mtd->_sync = cfi_intelext_sync;
+ mtd->_lock = cfi_intelext_lock;
+ mtd->_unlock = cfi_intelext_unlock;
+ mtd->_is_locked = cfi_intelext_is_locked;
+ mtd->_suspend = cfi_intelext_suspend;
+ mtd->_resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
@@ -600,12 +600,12 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
}
#ifdef CONFIG_MTD_OTP
- mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
- mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
- mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
- mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
- mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
- mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
+ mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
+ mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
+ mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
+ mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
#endif
/* This function has the potential to distort the reality
@@ -1017,8 +1017,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
case FL_READY:
case FL_STATUS:
case FL_JEDEC_QUERY:
- /* We should really make set_vpp() count, rather than doing this */
- DISABLE_VPP(map);
break;
default:
printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
@@ -1324,7 +1322,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
int chipnum;
int ret = 0;
- if (!map->virt || (from + len > mtd->size))
+ if (!map->virt)
return -EINVAL;
/* Now lock the chip(s) to POINT state */
@@ -1334,7 +1332,6 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
ofs = from - (chipnum << cfi->chipshift);
*virt = map->virt + cfi->chips[chipnum].start + ofs;
- *retlen = 0;
if (phys)
*phys = map->phys + cfi->chips[chipnum].start + ofs;
@@ -1369,12 +1366,12 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
-static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
- int chipnum;
+ int chipnum, err = 0;
/* Now unlock the chip(s) POINT state */
@@ -1382,7 +1379,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
- while (len) {
+ while (len && !err) {
unsigned long thislen;
struct flchip *chip;
@@ -1400,8 +1397,10 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
chip->state = FL_READY;
- } else
- printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
+ } else {
+ printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
+ err = -EINVAL;
+ }
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
@@ -1410,6 +1409,8 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
ofs = 0;
chipnum++;
}
+
+ return err;
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
@@ -1456,8 +1457,6 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -1551,7 +1550,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, adr);
- out: put_chip(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -1565,10 +1565,6 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
int chipnum;
unsigned long ofs;
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
@@ -1794,7 +1790,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, cmd_adr);
- out: put_chip(map, chip, cmd_adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, cmd_adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -1813,7 +1810,6 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
- *retlen = 0;
if (!len)
return 0;
@@ -1932,6 +1928,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
ret = -EIO;
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
goto retry;
@@ -1944,7 +1941,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, adr);
- out: put_chip(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -2086,7 +2084,8 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
}
xip_enable(map, chip, adr);
-out: put_chip(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -2483,7 +2482,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
allowed to. Or should we return -EAGAIN, because the upper layers
ought to have already shut down anything which was using the device
anyway? The latter for now. */
- printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
+ printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
ret = -EAGAIN;
case FL_PM_SUSPENDED:
break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 8d70895a58d6..d02592e6a0f0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,6 +59,9 @@ static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+
static void cfi_amdstd_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
@@ -189,7 +192,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
pr_debug("Using buffer write method\n" );
- mtd->write = cfi_amdstd_write_buffers;
+ mtd->_write = cfi_amdstd_write_buffers;
}
}
@@ -228,8 +231,8 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd)
static void fixup_use_secsi(struct mtd_info *mtd)
{
/* Setup for chips with a secsi area */
- mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
- mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
+ mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
+ mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
}
static void fixup_use_erase_chip(struct mtd_info *mtd)
@@ -238,7 +241,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->NumEraseRegions == 1) &&
((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
- mtd->erase = cfi_amdstd_erase_chip;
+ mtd->_erase = cfi_amdstd_erase_chip;
}
}
@@ -249,8 +252,8 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
*/
static void fixup_use_atmel_lock(struct mtd_info *mtd)
{
- mtd->lock = cfi_atmel_lock;
- mtd->unlock = cfi_atmel_unlock;
+ mtd->_lock = cfi_atmel_lock;
+ mtd->_unlock = cfi_atmel_unlock;
mtd->flags |= MTD_POWERUP_LOCK;
}
@@ -429,12 +432,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
- mtd->erase = cfi_amdstd_erase_varsize;
- mtd->write = cfi_amdstd_write_words;
- mtd->read = cfi_amdstd_read;
- mtd->sync = cfi_amdstd_sync;
- mtd->suspend = cfi_amdstd_suspend;
- mtd->resume = cfi_amdstd_resume;
+ mtd->_erase = cfi_amdstd_erase_varsize;
+ mtd->_write = cfi_amdstd_write_words;
+ mtd->_read = cfi_amdstd_read;
+ mtd->_sync = cfi_amdstd_sync;
+ mtd->_suspend = cfi_amdstd_suspend;
+ mtd->_resume = cfi_amdstd_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
@@ -443,6 +446,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
pr_debug("MTD %s(): write buffer size %d\n", __func__,
mtd->writebufsize);
+ mtd->_panic_write = cfi_amdstd_panic_write;
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
if (cfi->cfi_mode==CFI_MODE_CFI){
@@ -770,8 +774,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
case FL_READY:
case FL_STATUS:
- /* We should really make set_vpp() count, rather than doing this */
- DISABLE_VPP(map);
break;
default:
printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
@@ -1013,13 +1015,9 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
-
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
-
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -1097,16 +1095,11 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
int chipnum;
int ret = 0;
-
/* ofs: offset within the first chip that the first read should start */
-
/* 8 secsi bytes per chip */
chipnum=from>>3;
ofs=from & 7;
-
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -1234,6 +1227,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
op_done:
chip->state = FL_READY;
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
@@ -1251,10 +1245,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
chipstart = cfi->chips[chipnum].start;
@@ -1476,6 +1466,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
ret = -EIO;
op_done:
chip->state = FL_READY;
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
@@ -1493,10 +1484,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
int chipnum;
unsigned long ofs;
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
@@ -1562,6 +1549,238 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
return 0;
}
+/*
+ * Wait for the flash chip to become ready to write data
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ */
+static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retries = 10;
+ int i;
+
+ /*
+ * If the driver thinks the chip is idle, and no toggle bits
+ * are changing, then the chip is actually idle for sure.
+ */
+ if (chip->state == FL_READY && chip_ready(map, adr))
+ return 0;
+
+ /*
+ * Try several times to reset the chip and then wait for it
+ * to become idle. The upper limit of a few milliseconds of
+ * delay isn't a big problem: the kernel is dying anyway. It
+ * is more important to save the messages.
+ */
+ while (retries > 0) {
+ const unsigned long timeo = (HZ / 1000) + 1;
+
+ /* send the reset command */
+ map_write(map, CMD(0xF0), chip->start);
+
+ /* wait for the chip to become ready */
+ for (i = 0; i < jiffies_to_usecs(timeo); i++) {
+ if (chip_ready(map, adr))
+ return 0;
+
+ udelay(1);
+ }
+ }
+
+ /* the chip never became ready */
+ return -EBUSY;
+}
+
+/*
+ * Write out one word of data to a single flash chip during a kernel panic
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ *
+ * The implementation of this routine is intentionally similar to
+ * do_write_oneword(), in order to ease code maintenance.
+ */
+static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum)
+{
+ const unsigned long uWriteTimeout = (HZ / 1000) + 1;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retry_cnt = 0;
+ map_word oldd;
+ int ret = 0;
+ int i;
+
+ adr += chip->start;
+
+ ret = cfi_amdstd_panic_wait(map, chip, adr);
+ if (ret)
+ return ret;
+
+ pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0]);
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ pr_debug("MTD %s(): NOP\n", __func__);
+ goto op_done;
+ }
+
+ ENABLE_VPP(map);
+
+retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+
+ for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
+ if (chip_ready(map, adr))
+ break;
+
+ udelay(1);
+ }
+
+ if (!chip_good(map, adr, datum)) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_WORD_RETRIES)
+ goto retry;
+
+ ret = -EIO;
+ }
+
+op_done:
+ DISABLE_VPP(map);
+ return ret;
+}
+
+/*
+ * Write out some data during a kernel panic
+ *
+ * This is used by the mtdoops driver to save the dying messages from a
+ * kernel which has panic'd.
+ *
+ * This routine ignores all of the locking used throughout the rest of the
+ * driver, in order to ensure that the data gets written out no matter what
+ * state this driver (and the flash chip itself) was in when the kernel crashed.
+ *
+ * The implementation of this routine is intentionally similar to
+ * cfi_amdstd_write_words(), in order to ease code maintenance.
+ */
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs, chipstart;
+ int ret = 0;
+ int chipnum;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ chipstart = cfi->chips[chipnum].start;
+
+ /* If it's not bus aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map) - 1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
+ int i = ofs - bus_ofs;
+ int n = 0;
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
+ if (ret)
+ return ret;
+
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs + chipstart);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map) - i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+ len -= n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* We are now aligned, write as much as possible */
+ while (len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+
+ chipstart = cfi->chips[chipnum].start;
+ }
+ }
+
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map) - 1)) {
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
+ if (ret)
+ return ret;
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
/*
* Handle devices with one erase region, that only implement
@@ -1649,6 +1868,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
@@ -1739,6 +1959,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
}
chip->state = FL_READY;
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 85e80180b65b..096993f9711e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -228,15 +228,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
}
/* Also select the correct geometry setup too */
- mtd->erase = cfi_staa_erase_varsize;
- mtd->read = cfi_staa_read;
- mtd->write = cfi_staa_write_buffers;
- mtd->writev = cfi_staa_writev;
- mtd->sync = cfi_staa_sync;
- mtd->lock = cfi_staa_lock;
- mtd->unlock = cfi_staa_unlock;
- mtd->suspend = cfi_staa_suspend;
- mtd->resume = cfi_staa_resume;
+ mtd->_erase = cfi_staa_erase_varsize;
+ mtd->_read = cfi_staa_read;
+ mtd->_write = cfi_staa_write_buffers;
+ mtd->_writev = cfi_staa_writev;
+ mtd->_sync = cfi_staa_sync;
+ mtd->_lock = cfi_staa_lock;
+ mtd->_unlock = cfi_staa_unlock;
+ mtd->_suspend = cfi_staa_suspend;
+ mtd->_resume = cfi_staa_resume;
mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
@@ -394,8 +394,6 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -617,10 +615,6 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
int chipnum;
unsigned long ofs;
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
@@ -904,12 +898,6 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
- if (instr->addr > mtd->size)
- return -EINVAL;
-
- if ((instr->len + instr->addr) > mtd->size)
- return -EINVAL;
-
/* Check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
*/
@@ -1155,9 +1143,6 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (len & (mtd->erasesize -1))
return -EINVAL;
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
chipnum = ofs >> cfi->chipshift;
adr = ofs - (chipnum << cfi->chipshift);
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 8e464054a631..f992418f40a8 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -173,12 +173,6 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
- if (ofs > mtd->size)
- return -EINVAL;
-
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
/* Check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
*/
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 89c6595454a5..800b0e853e86 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -101,7 +101,7 @@ static void fixup_use_fwh_lock(struct mtd_info *mtd)
{
printk(KERN_NOTICE "using fwh lock/unlock method\n");
/* Setup for the chips with the fwh lock method */
- mtd->lock = fwh_lock_varsize;
- mtd->unlock = fwh_unlock_varsize;
+ mtd->_lock = fwh_lock_varsize;
+ mtd->_unlock = fwh_unlock_varsize;
}
#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index f2b872946871..f7a5bca92aef 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -55,10 +55,10 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ABSENT;
mtd->size = map->size;
- mtd->erase = map_absent_erase;
- mtd->read = map_absent_read;
- mtd->write = map_absent_write;
- mtd->sync = map_absent_sync;
+ mtd->_erase = map_absent_erase;
+ mtd->_read = map_absent_read;
+ mtd->_write = map_absent_write;
+ mtd->_sync = map_absent_sync;
mtd->flags = 0;
mtd->erasesize = PAGE_SIZE;
mtd->writesize = 1;
@@ -70,13 +70,11 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- *retlen = 0;
return -ENODEV;
}
static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- *retlen = 0;
return -ENODEV;
}
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 67640ccb2d41..991c2a1c05d3 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -64,11 +64,11 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_RAM;
mtd->size = map->size;
- mtd->erase = mapram_erase;
- mtd->get_unmapped_area = mapram_unmapped_area;
- mtd->read = mapram_read;
- mtd->write = mapram_write;
- mtd->sync = mapram_nop;
+ mtd->_erase = mapram_erase;
+ mtd->_get_unmapped_area = mapram_unmapped_area;
+ mtd->_read = mapram_read;
+ mtd->_write = mapram_write;
+ mtd->_sync = mapram_nop;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@@ -122,14 +122,10 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
unsigned long i;
allff = map_word_ff(map);
-
for (i=0; i<instr->len; i += map_bankwidth(map))
map_write(map, allff, instr->addr + i);
-
instr->state = MTD_ERASE_DONE;
-
mtd_erase_callback(instr);
-
return 0;
}
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 593f73d480d2..47a43cf7e5c6 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -41,11 +41,11 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
- mtd->get_unmapped_area = maprom_unmapped_area;
- mtd->read = maprom_read;
- mtd->write = maprom_write;
- mtd->sync = maprom_nop;
- mtd->erase = maprom_erase;
+ mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_read = maprom_read;
+ mtd->_write = maprom_write;
+ mtd->_sync = maprom_nop;
+ mtd->_erase = maprom_erase;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = map->size;
mtd->writesize = 1;
@@ -85,8 +85,7 @@ static void maprom_nop(struct mtd_info *mtd)
static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- printk(KERN_NOTICE "maprom_write called\n");
- return -EIO;
+ return -EROFS;
}
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 8d3dac40d7e6..4cdb2af7bf44 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -103,6 +103,13 @@ config M25PXX_USE_FAST_READ
help
This option enables FAST_READ access supported by ST M25Pxx.
+config MTD_SPEAR_SMI
+ tristate "SPEAR MTD NOR Support through SMI controller"
+ depends on PLAT_SPEAR
+ default y
+ help
+ This enable SNOR support on SPEAR platforms using SMI controller
+
config MTD_SST25L
tristate "Support SST25L (non JEDEC) SPI Flash chips"
depends on SPI_MASTER
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 56c7cd462f11..a4dd1d822b6c 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e7e46d1e7463..a4a80b742e65 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -104,14 +104,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
int offset = from & (PAGE_SIZE-1);
int cpylen;
- if (from > mtd->size)
- return -EINVAL;
- if (from + len > mtd->size)
- len = mtd->size - from;
-
- if (retlen)
- *retlen = 0;
-
while (len) {
if ((offset + len) > PAGE_SIZE)
cpylen = PAGE_SIZE - offset; // multiple pages
@@ -148,8 +140,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
- if (retlen)
- *retlen = 0;
while (len) {
if ((offset+len) > PAGE_SIZE)
cpylen = PAGE_SIZE - offset; // multiple pages
@@ -188,13 +178,6 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
struct block2mtd_dev *dev = mtd->priv;
int err;
- if (!len)
- return 0;
- if (to >= mtd->size)
- return -ENOSPC;
- if (to + len > mtd->size)
- len = mtd->size - to;
-
mutex_lock(&dev->write_mutex);
err = _block2mtd_write(dev, buf, to, len, retlen);
mutex_unlock(&dev->write_mutex);
@@ -283,13 +266,14 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
dev->mtd.type = MTD_RAM;
dev->mtd.flags = MTD_CAP_RAM;
- dev->mtd.erase = block2mtd_erase;
- dev->mtd.write = block2mtd_write;
- dev->mtd.writev = mtd_writev;
- dev->mtd.sync = block2mtd_sync;
- dev->mtd.read = block2mtd_read;
+ dev->mtd._erase = block2mtd_erase;
+ dev->mtd._write = block2mtd_write;
+ dev->mtd._writev = mtd_writev;
+ dev->mtd._sync = block2mtd_sync;
+ dev->mtd._read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index b1cdf6479019..a4eb8b5b85ec 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,14 +562,15 @@ void DoC2k_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->writesize = 512;
+ mtd->writebufsize = mtd->writesize = 512;
mtd->oobsize = 16;
+ mtd->ecc_strength = 2;
mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
this->curfloor = -1;
this->curchip = -1;
mutex_init(&this->lock);
@@ -602,13 +603,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
int i, len256 = 0, ret=0;
size_t left = len;
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
mutex_lock(&this->lock);
-
- *retlen = 0;
while (left) {
len = left;
@@ -748,13 +743,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t left = len;
int status;
- /* Don't allow write past end of device */
- if (to >= this->totlen)
- return -EINVAL;
-
mutex_lock(&this->lock);
-
- *retlen = 0;
while (left) {
len = left;
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 7543b98f46c4..f6927955dab0 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -346,14 +346,15 @@ void DoCMil_init(struct mtd_info *mtd)
/* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
- mtd->writesize = 512;
+ mtd->writebufsize = mtd->writesize = 512;
mtd->oobsize = 16;
+ mtd->ecc_strength = 2;
mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
this->curfloor = -1;
this->curchip = -1;
@@ -383,10 +384,6 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[from >> (this->chipshift)];
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
/* Don't allow a single read to cross a 512-byte block boundary */
if (from + len > ((from | 0x1ff) + 1))
len = ((from | 0x1ff) + 1) - from;
@@ -494,10 +491,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[to >> (this->chipshift)];
- /* Don't allow write past end of device */
- if (to >= this->totlen)
- return -EINVAL;
-
#if 0
/* Don't allow a single write to cross a 512-byte block boundary */
if (to + len > ( (to | 0x1ff) + 1))
@@ -599,7 +592,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
printk("Error programming flash\n");
/* Error in programming
FIXME: implement Bad Block Replacement (in nftl.c ??) */
- *retlen = 0;
ret = -EIO;
}
dummy = ReadDOC(docptr, LastDataRead);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 177510d0e7ee..04eb2e4aa50f 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,14 +467,15 @@ void DoCMilPlus_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->writesize = 512;
+ mtd->writebufsize = mtd->writesize = 512;
mtd->oobsize = 16;
+ mtd->ecc_strength = 2;
mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
this->curfloor = -1;
this->curchip = -1;
@@ -581,10 +582,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
void __iomem * docptr = this->virtadr;
struct Nand *mychip = &this->chips[from >> (this->chipshift)];
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
/* Don't allow a single read to cross a 512-byte block boundary */
if (from + len > ((from | 0x1ff) + 1))
len = ((from | 0x1ff) + 1) - from;
@@ -700,10 +697,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
void __iomem * docptr = this->virtadr;
struct Nand *mychip = &this->chips[to >> (this->chipshift)];
- /* Don't allow write past end of device */
- if (to >= this->totlen)
- return -EINVAL;
-
/* Don't allow writes which aren't exactly one block (512 bytes) */
if ((to & 0x1ff) || (len != 0x200))
return -EINVAL;
@@ -800,7 +793,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
/* Error in programming
FIXME: implement Bad Block Replacement (in nftl.c ??) */
- *retlen = 0;
ret = -EIO;
}
dummy = ReadDOC(docptr, Mplus_LastDataRead);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index ad11ef0a81f4..8272c02668d6 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -80,14 +80,9 @@ static struct nand_ecclayout docg3_oobinfo = {
.oobavail = 8,
};
-/**
- * struct docg3_bch - BCH engine
- */
-static struct bch_control *docg3_bch;
-
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
- u8 val = readb(docg3->base + reg);
+ u8 val = readb(docg3->cascade->base + reg);
trace_docg3_io(0, 8, reg, (int)val);
return val;
@@ -95,7 +90,7 @@ static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
{
- u16 val = readw(docg3->base + reg);
+ u16 val = readw(docg3->cascade->base + reg);
trace_docg3_io(0, 16, reg, (int)val);
return val;
@@ -103,13 +98,13 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
- writeb(val, docg3->base + reg);
+ writeb(val, docg3->cascade->base + reg);
trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
{
- writew(val, docg3->base + reg);
+ writew(val, docg3->cascade->base + reg);
trace_docg3_io(1, 16, reg, val);
}
@@ -643,7 +638,8 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
ecc[i] = bitrev8(hwecc[i]);
- numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+ numerrs = decode_bch(docg3->cascade->bch, NULL,
+ DOC_ECC_BCH_COVERED_BYTES,
NULL, ecc, NULL, errorpos);
BUG_ON(numerrs == -EINVAL);
if (numerrs < 0)
@@ -734,7 +730,7 @@ err:
* doc_read_page_getbytes - Reads bytes from a prepared page
* @docg3: the device
* @len: the number of bytes to be read (must be a multiple of 4)
- * @buf: the buffer to be filled in
+ * @buf: the buffer to be filled in (or NULL is forget bytes)
* @first: 1 if first time read, DOC_READADDRESS should be set
*
*/
@@ -849,7 +845,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ret, ofs = 0;
+ int block0, block1, page, ret, skip, ofs = 0;
u8 *oobbuf = ops->oobbuf;
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
@@ -869,34 +865,36 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
from, ops->mode, buf, len, oobbuf, ooblen);
- if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
- (from % DOC_LAYOUT_PAGE_SIZE))
+ if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- ret = -EINVAL;
- calc_block_sector(from + len, &block0, &block1, &page, &ofs,
- docg3->reliable);
- if (block1 > docg3->max_block)
- goto err;
+ if (from + len > mtd->size)
+ return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
+ skip = from % DOC_LAYOUT_PAGE_SIZE;
+ mutex_lock(&docg3->cascade->lock);
while (!ret && (len > 0 || ooblen > 0)) {
- calc_block_sector(from, &block0, &block1, &page, &ofs,
+ calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
- nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
- goto err;
+ goto out;
ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+ if (ret < skip)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
if (ret < nbdata)
goto err_in_read;
- doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+ doc_read_page_getbytes(docg3,
+ DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
NULL, 0);
ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
if (ret < nboob)
@@ -950,13 +948,15 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
len -= nbdata;
ooblen -= nboob;
from += DOC_LAYOUT_PAGE_SIZE;
+ skip = 0;
}
+out:
+ mutex_unlock(&docg3->cascade->lock);
return ret;
err_in_read:
doc_read_page_finish(docg3);
-err:
- return ret;
+ goto out;
}
/**
@@ -1114,10 +1114,10 @@ static int doc_get_op_status(struct docg3 *docg3)
*/
static int doc_write_erase_wait_status(struct docg3 *docg3)
{
- int status, ret = 0;
+ int i, status, ret = 0;
- if (!doc_is_ready(docg3))
- usleep_range(3000, 3000);
+ for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
+ msleep(20);
if (!doc_is_ready(docg3)) {
doc_dbg("Timeout reached and the chip is still not ready\n");
ret = -EAGAIN;
@@ -1196,18 +1196,19 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
int block0, block1, page, ret, ofs = 0;
doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
- doc_set_device_id(docg3, docg3->device_id);
info->state = MTD_ERASE_PENDING;
calc_block_sector(info->addr + info->len, &block0, &block1, &page,
&ofs, docg3->reliable);
ret = -EINVAL;
- if (block1 > docg3->max_block || page || ofs)
+ if (info->addr + info->len > mtd->size || page || ofs)
goto reset_err;
ret = 0;
calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
docg3->reliable);
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
doc_set_reliable_mode(docg3);
for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
info->state = MTD_ERASING;
@@ -1215,6 +1216,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
block0 += 2;
block1 += 2;
}
+ mutex_unlock(&docg3->cascade->lock);
if (ret)
goto reset_err;
@@ -1401,7 +1403,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+ int ret, autoecc, oobdelta;
u8 *oobbuf = ops->oobbuf;
u8 *buf = ops->datbuf;
size_t len, ooblen;
@@ -1438,12 +1440,8 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
-
- ret = -EINVAL;
- calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
- docg3->reliable);
- if (block1 > docg3->max_block)
- goto err;
+ if (ofs + len > mtd->size)
+ return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
@@ -1457,6 +1455,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (autoecc < 0)
return autoecc;
+ mutex_lock(&docg3->cascade->lock);
while (!ret && len > 0) {
memset(oob, 0, sizeof(oob));
if (ofs == docg3->oob_write_ofs)
@@ -1477,8 +1476,9 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
}
ops->retlen += DOC_LAYOUT_PAGE_SIZE;
}
-err:
+
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return ret;
}
@@ -1535,9 +1535,11 @@ static ssize_t dps0_is_key_locked(struct device *dev,
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int dps0;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
}
@@ -1548,9 +1550,11 @@ static ssize_t dps1_is_key_locked(struct device *dev,
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int dps1;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
}
@@ -1565,10 +1569,12 @@ static ssize_t dps0_insert_key(struct device *dev,
if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
return -EINVAL;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return count;
}
@@ -1582,10 +1588,12 @@ static ssize_t dps1_insert_key(struct device *dev,
if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
return -EINVAL;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return count;
}
@@ -1601,13 +1609,13 @@ static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
};
static int doc_register_sysfs(struct platform_device *pdev,
- struct mtd_info **floors)
+ struct docg3_cascade *cascade)
{
int ret = 0, floor, i = 0;
struct device *dev = &pdev->dev;
- for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
- floor++)
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
+ cascade->floors[floor]; floor++)
for (i = 0; !ret && i < 4; i++)
ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
if (!ret)
@@ -1621,12 +1629,12 @@ static int doc_register_sysfs(struct platform_device *pdev,
}
static void doc_unregister_sysfs(struct platform_device *pdev,
- struct mtd_info **floors)
+ struct docg3_cascade *cascade)
{
struct device *dev = &pdev->dev;
int floor, i;
- for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
floor++)
for (i = 0; i < 4; i++)
device_remove_file(dev, &doc_sys_attrs[floor][i]);
@@ -1640,7 +1648,11 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p)
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ u8 fctrl;
+
+ mutex_lock(&docg3->cascade->lock);
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s,
"FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
@@ -1658,9 +1670,12 @@ static int dbg_asicmode_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
- int pctrl = doc_register_readb(docg3, DOC_ASICMODE);
- int mode = pctrl & 0x03;
+ int pos = 0, pctrl, mode;
+
+ mutex_lock(&docg3->cascade->lock);
+ pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+ mode = pctrl & 0x03;
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s,
"%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
@@ -1692,7 +1707,11 @@ static int dbg_device_id_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- int id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ int id;
+
+ mutex_lock(&docg3->cascade->lock);
+ id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s, "DeviceId = %d\n", id);
return pos;
@@ -1705,6 +1724,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
int pos = 0;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+ mutex_lock(&docg3->cascade->lock);
protect = doc_register_readb(docg3, DOC_PROTECTION);
dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
@@ -1712,6 +1732,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s, "Protection = 0x%02x (",
protect);
@@ -1804,7 +1825,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+ mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
docg3->device_id);
docg3->max_block = 2047;
break;
@@ -1817,16 +1838,17 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
if (docg3->reliable == 2)
mtd->erasesize /= 2;
- mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+ mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
- mtd->block_isbad = doc_block_isbad;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
+ mtd->_block_isbad = doc_block_isbad;
mtd->ecclayout = &docg3_oobinfo;
+ mtd->ecc_strength = DOC_ECC_BCH_T;
}
/**
@@ -1834,6 +1856,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
* @base: the io space where the device is probed
* @floor: the floor of the probed device
* @dev: the device
+ * @cascade: the cascade of chips this devices will belong to
*
* Checks whether a device at the specified IO range, and floor is available.
*
@@ -1841,8 +1864,8 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
* if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
* launched.
*/
-static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
- struct device *dev)
+static struct mtd_info * __init
+doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
{
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
@@ -1865,7 +1888,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
docg3->dev = dev;
docg3->device_id = floor;
- docg3->base = base;
+ docg3->cascade = cascade;
doc_set_device_id(docg3, docg3->device_id);
if (!floor)
doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
@@ -1882,7 +1905,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
switch (chip_id) {
case DOC_CHIPID_G3:
doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
- base, floor);
+ docg3->cascade->base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
@@ -1927,10 +1950,12 @@ static void doc_release_device(struct mtd_info *mtd)
static int docg3_resume(struct platform_device *pdev)
{
int i;
+ struct docg3_cascade *cascade;
struct mtd_info **docg3_floors, *mtd;
struct docg3 *docg3;
- docg3_floors = platform_get_drvdata(pdev);
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
mtd = docg3_floors[0];
docg3 = mtd->priv;
@@ -1952,11 +1977,13 @@ static int docg3_resume(struct platform_device *pdev)
static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
{
int floor, i;
+ struct docg3_cascade *cascade;
struct mtd_info **docg3_floors, *mtd;
struct docg3 *docg3;
u8 ctrl, pwr_down;
- docg3_floors = platform_get_drvdata(pdev);
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
mtd = docg3_floors[floor];
if (!mtd)
@@ -2006,7 +2033,7 @@ static int __init docg3_probe(struct platform_device *pdev)
struct resource *ress;
void __iomem *base;
int ret, floor, found = 0;
- struct mtd_info **docg3_floors;
+ struct docg3_cascade *cascade;
ret = -ENXIO;
ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2017,17 +2044,19 @@ static int __init docg3_probe(struct platform_device *pdev)
base = ioremap(ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
- docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
- GFP_KERNEL);
- if (!docg3_floors)
+ cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!cascade)
goto nomem1;
- docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ cascade->base = base;
+ mutex_init(&cascade->lock);
+ cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
DOC_ECC_BCH_PRIMPOLY);
- if (!docg3_bch)
+ if (!cascade->bch)
goto nomem2;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
- mtd = doc_probe_device(base, floor, dev);
+ mtd = doc_probe_device(cascade, floor, dev);
if (IS_ERR(mtd)) {
ret = PTR_ERR(mtd);
goto err_probe;
@@ -2038,7 +2067,7 @@ static int __init docg3_probe(struct platform_device *pdev)
else
continue;
}
- docg3_floors[floor] = mtd;
+ cascade->floors[floor] = mtd;
ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
0);
if (ret)
@@ -2046,26 +2075,26 @@ static int __init docg3_probe(struct platform_device *pdev)
found++;
}
- ret = doc_register_sysfs(pdev, docg3_floors);
+ ret = doc_register_sysfs(pdev, cascade);
if (ret)
goto err_probe;
if (!found)
goto notfound;
- platform_set_drvdata(pdev, docg3_floors);
- doc_dbg_register(docg3_floors[0]->priv);
+ platform_set_drvdata(pdev, cascade);
+ doc_dbg_register(cascade->floors[0]->priv);
return 0;
notfound:
ret = -ENODEV;
dev_info(dev, "No supported DiskOnChip found\n");
err_probe:
- free_bch(docg3_bch);
+ kfree(cascade->bch);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
- if (docg3_floors[floor])
- doc_release_device(docg3_floors[floor]);
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
nomem2:
- kfree(docg3_floors);
+ kfree(cascade);
nomem1:
iounmap(base);
noress:
@@ -2080,19 +2109,19 @@ noress:
*/
static int __exit docg3_release(struct platform_device *pdev)
{
- struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
- struct docg3 *docg3 = docg3_floors[0]->priv;
- void __iomem *base = docg3->base;
+ struct docg3_cascade *cascade = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = cascade->floors[0]->priv;
+ void __iomem *base = cascade->base;
int floor;
- doc_unregister_sysfs(pdev, docg3_floors);
+ doc_unregister_sysfs(pdev, cascade);
doc_dbg_unregister(docg3);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
- if (docg3_floors[floor])
- doc_release_device(docg3_floors[floor]);
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
- kfree(docg3_floors);
- free_bch(docg3_bch);
+ free_bch(docg3->cascade->bch);
+ kfree(cascade);
iounmap(base);
return 0;
}
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index db0da436b493..19fb93f96a3a 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -22,6 +22,8 @@
#ifndef _MTD_DOCG3_H
#define _MTD_DOCG3_H
+#include <linux/mtd/mtd.h>
+
/*
* Flash memory areas :
* - 0x0000 .. 0x07ff : IPL
@@ -267,9 +269,23 @@
#define DOC_LAYOUT_DPS_KEY_LENGTH 8
/**
+ * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
+ * @floors: floors (ie. one physical docg3 chip is one floor)
+ * @base: IO space to access all chips in the cascade
+ * @bch: the BCH correcting control structure
+ * @lock: lock to protect docg3 IO space from concurrent accesses
+ */
+struct docg3_cascade {
+ struct mtd_info *floors[DOC_MAX_NBFLOORS];
+ void __iomem *base;
+ struct bch_control *bch;
+ struct mutex lock;
+};
+
+/**
* struct docg3 - DiskOnChip driver private data
* @dev: the device currently under control
- * @base: mapped IO space
+ * @cascade: the cascade this device belongs to
* @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
* @if_cfg: if true, reads are on 16bits, else reads are on 8bits
@@ -287,7 +303,7 @@
*/
struct docg3 {
struct device *dev;
- void __iomem *base;
+ struct docg3_cascade *cascade;
unsigned int device_id:4;
unsigned int if_cfg:1;
unsigned int reliable:2;
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 3a11ea628e58..82bd00af5cc3 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -367,9 +367,6 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
#endif
- /* sanity checks */
- if (instr->addr + instr->len > mtd->size) return (-EINVAL);
-
/*
* check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
@@ -440,10 +437,6 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle
printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
#endif
- /* sanity checks */
- if (!len) return (0);
- if (from + len > mtd->size) return (-EINVAL);
-
/* we always read len bytes */
*retlen = len;
@@ -522,11 +515,8 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
#endif
- *retlen = 0;
-
/* sanity checks */
if (!len) return (0);
- if (to + len > mtd->size) return (-EINVAL);
/* first, we write a 0xFF.... padded byte until we reach a dword boundary */
if (to & (BUSWIDTH - 1))
@@ -630,14 +620,15 @@ static int __init lart_flash_init (void)
mtd.name = module_name;
mtd.type = MTD_NORFLASH;
mtd.writesize = 1;
+ mtd.writebufsize = 4;
mtd.flags = MTD_CAP_NORFLASH;
mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
mtd.numeraseregions = ARRAY_SIZE(erase_regions);
mtd.eraseregions = erase_regions;
- mtd.erase = flash_erase;
- mtd.read = flash_read;
- mtd.write = flash_write;
+ mtd._erase = flash_erase;
+ mtd._read = flash_read;
+ mtd._write = flash_write;
mtd.owner = THIS_MODULE;
#ifdef LART_DEBUG
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 7c60dddbefc0..1924d247c1cb 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -288,9 +288,6 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
__func__, (long long)instr->addr,
(long long)instr->len);
- /* sanity checks */
- if (instr->addr + instr->len > flash->mtd.size)
- return -EINVAL;
div_u64_rem(instr->len, mtd->erasesize, &rem);
if (rem)
return -EINVAL;
@@ -349,13 +346,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)from, len);
- /* sanity checks */
- if (!len)
- return 0;
-
- if (from + len > flash->mtd.size)
- return -EINVAL;
-
spi_message_init(&m);
memset(t, 0, (sizeof t));
@@ -371,9 +361,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- /* Byte count starts at zero. */
- *retlen = 0;
-
mutex_lock(&flash->lock);
/* Wait till previous write/erase is done. */
@@ -417,15 +404,6 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)to, len);
- *retlen = 0;
-
- /* sanity checks */
- if (!len)
- return(0);
-
- if (to + len > flash->mtd.size)
- return -EINVAL;
-
spi_message_init(&m);
memset(t, 0, (sizeof t));
@@ -509,15 +487,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)to, len);
- *retlen = 0;
-
- /* sanity checks */
- if (!len)
- return 0;
-
- if (to + len > flash->mtd.size)
- return -EINVAL;
-
spi_message_init(&m);
memset(t, 0, (sizeof t));
@@ -908,14 +877,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.writesize = 1;
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.size = info->sector_size * info->n_sectors;
- flash->mtd.erase = m25p80_erase;
- flash->mtd.read = m25p80_read;
+ flash->mtd._erase = m25p80_erase;
+ flash->mtd._read = m25p80_read;
/* sst flash chips use AAI word program */
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
- flash->mtd.write = sst_write;
+ flash->mtd._write = sst_write;
else
- flash->mtd.write = m25p80_write;
+ flash->mtd._write = m25p80_write;
/* prefer "small sector" erase if possible */
if (info->flags & SECT_4K) {
@@ -932,6 +901,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
ppdata.of_node = spi->dev.of_node;
flash->mtd.dev.parent = &spi->dev;
flash->page_size = info->page_size;
+ flash->mtd.writebufsize = flash->page_size;
if (info->addr_width)
flash->addr_width = info->addr_width;
@@ -1004,21 +974,7 @@ static struct spi_driver m25p80_driver = {
*/
};
-
-static int __init m25p80_init(void)
-{
- return spi_register_driver(&m25p80_driver);
-}
-
-
-static void __exit m25p80_exit(void)
-{
- spi_unregister_driver(&m25p80_driver);
-}
-
-
-module_init(m25p80_init);
-module_exit(m25p80_exit);
+module_spi_driver(m25p80_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 8423fb6d4f26..182849d39c61 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -59,12 +59,8 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from,
{
struct ms02nv_private *mp = mtd->priv;
- if (from + len > mtd->size)
- return -EINVAL;
-
memcpy(buf, mp->uaddr + from, len);
*retlen = len;
-
return 0;
}
@@ -73,12 +69,8 @@ static int ms02nv_write(struct mtd_info *mtd, loff_t to,
{
struct ms02nv_private *mp = mtd->priv;
- if (to + len > mtd->size)
- return -EINVAL;
-
memcpy(mp->uaddr + to, buf, len);
*retlen = len;
-
return 0;
}
@@ -215,8 +207,8 @@ static int __init ms02nv_init_one(ulong addr)
mtd->size = fixsize;
mtd->name = (char *)ms02nv_name;
mtd->owner = THIS_MODULE;
- mtd->read = ms02nv_read;
- mtd->write = ms02nv_write;
+ mtd->_read = ms02nv_read;
+ mtd->_write = ms02nv_write;
mtd->writesize = 1;
ret = -EIO;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 236057ead0d2..928fb0e6d73a 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -164,9 +164,6 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
dev_name(&spi->dev), (long long)instr->addr,
(long long)instr->len);
- /* Sanity checks */
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
div_u64_rem(instr->len, priv->page_size, &rem);
if (rem)
return -EINVAL;
@@ -252,14 +249,6 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
(unsigned)from, (unsigned)(from + len));
- *retlen = 0;
-
- /* Sanity checks */
- if (!len)
- return 0;
- if (from + len > mtd->size)
- return -EINVAL;
-
/* Calculate flash page/byte address */
addr = (((unsigned)from / priv->page_size) << priv->page_offset)
+ ((unsigned)from % priv->page_size);
@@ -328,14 +317,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
pr_debug("%s: write 0x%x..0x%x\n",
dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
- *retlen = 0;
-
- /* Sanity checks */
- if (!len)
- return 0;
- if ((to + len) > mtd->size)
- return -EINVAL;
-
spi_message_init(&msg);
x[0].tx_buf = command = priv->command;
@@ -490,8 +471,6 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base,
if ((off + len) > 64)
len = 64 - off;
- if (len == 0)
- return len;
spi_message_init(&m);
@@ -611,16 +590,16 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
static char *otp_setup(struct mtd_info *device, char revision)
{
- device->get_fact_prot_info = dataflash_get_otp_info;
- device->read_fact_prot_reg = dataflash_read_fact_otp;
- device->get_user_prot_info = dataflash_get_otp_info;
- device->read_user_prot_reg = dataflash_read_user_otp;
+ device->_get_fact_prot_info = dataflash_get_otp_info;
+ device->_read_fact_prot_reg = dataflash_read_fact_otp;
+ device->_get_user_prot_info = dataflash_get_otp_info;
+ device->_read_user_prot_reg = dataflash_read_user_otp;
/* rev c parts (at45db321c and at45db1281 only!) use a
* different write procedure; not (yet?) implemented.
*/
if (revision > 'c')
- device->write_user_prot_reg = dataflash_write_user_otp;
+ device->_write_user_prot_reg = dataflash_write_user_otp;
return ", OTP";
}
@@ -672,9 +651,9 @@ add_dataflash_otp(struct spi_device *spi, char *name,
device->owner = THIS_MODULE;
device->type = MTD_DATAFLASH;
device->flags = MTD_WRITEABLE;
- device->erase = dataflash_erase;
- device->read = dataflash_read;
- device->write = dataflash_write;
+ device->_erase = dataflash_erase;
+ device->_read = dataflash_read;
+ device->_write = dataflash_write;
device->priv = priv;
device->dev.parent = &spi->dev;
@@ -946,18 +925,7 @@ static struct spi_driver dataflash_driver = {
/* FIXME: investigate suspend and resume... */
};
-static int __init dataflash_init(void)
-{
- return spi_register_driver(&dataflash_driver);
-}
-module_init(dataflash_init);
-
-static void __exit dataflash_exit(void)
-{
- spi_unregister_driver(&dataflash_driver);
-}
-module_exit(dataflash_exit);
-
+module_spi_driver(dataflash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrew Victor, David Brownell");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 2562689ba6b4..ec59d65897fb 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -34,34 +34,23 @@ static struct mtd_info *mtd_info;
static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
-
memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
-
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
-
return 0;
}
static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
*virt = mtd->priv + from;
*retlen = len;
return 0;
}
-static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
+ return 0;
}
/*
@@ -80,11 +69,7 @@ static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
memcpy(buf, mtd->priv + from, len);
-
*retlen = len;
return 0;
}
@@ -92,11 +77,7 @@ static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- if (to + len > mtd->size)
- return -EINVAL;
-
memcpy((char *)mtd->priv + to, buf, len);
-
*retlen = len;
return 0;
}
@@ -126,12 +107,12 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->priv = mapped_address;
mtd->owner = THIS_MODULE;
- mtd->erase = ram_erase;
- mtd->point = ram_point;
- mtd->unpoint = ram_unpoint;
- mtd->get_unmapped_area = ram_get_unmapped_area;
- mtd->read = ram_read;
- mtd->write = ram_write;
+ mtd->_erase = ram_erase;
+ mtd->_point = ram_point;
+ mtd->_unpoint = ram_unpoint;
+ mtd->_get_unmapped_area = ram_get_unmapped_area;
+ mtd->_read = ram_read;
+ mtd->_write = ram_write;
if (mtd_device_register(mtd, NULL, 0))
return -EIO;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 23423bd00b06..67823de68db6 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -33,45 +33,33 @@ struct phram_mtd_list {
static LIST_HEAD(phram_list);
-
static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
u_char *start = mtd->priv;
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
-
memset(start + instr->addr, 0xff, instr->len);
- /* This'll catch a few races. Free the thing before returning :)
+ /*
+ * This'll catch a few races. Free the thing before returning :)
* I don't feel at all ashamed. This kind of thing is possible anyway
* with flash, but unlikely.
*/
-
instr->state = MTD_ERASE_DONE;
-
mtd_erase_callback(instr);
-
return 0;
}
static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
*virt = mtd->priv + from;
*retlen = len;
return 0;
}
-static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
+ return 0;
}
static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -79,14 +67,7 @@ static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
{
u_char *start = mtd->priv;
- if (from >= mtd->size)
- return -EINVAL;
-
- if (len > mtd->size - from)
- len = mtd->size - from;
-
memcpy(buf, start + from, len);
-
*retlen = len;
return 0;
}
@@ -96,20 +77,11 @@ static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
{
u_char *start = mtd->priv;
- if (to >= mtd->size)
- return -EINVAL;
-
- if (len > mtd->size - to)
- len = mtd->size - to;
-
memcpy(start + to, buf, len);
-
*retlen = len;
return 0;
}
-
-
static void unregister_devices(void)
{
struct phram_mtd_list *this, *safe;
@@ -142,11 +114,11 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.name = name;
new->mtd.size = len;
new->mtd.flags = MTD_CAP_RAM;
- new->mtd.erase = phram_erase;
- new->mtd.point = phram_point;
- new->mtd.unpoint = phram_unpoint;
- new->mtd.read = phram_read;
- new->mtd.write = phram_write;
+ new->mtd._erase = phram_erase;
+ new->mtd._point = phram_point;
+ new->mtd._unpoint = phram_unpoint;
+ new->mtd._read = phram_read;
+ new->mtd._write = phram_write;
new->mtd.owner = THIS_MODULE;
new->mtd.type = MTD_RAM;
new->mtd.erasesize = PAGE_SIZE;
@@ -233,7 +205,17 @@ static inline void kill_final_newline(char *str)
return 1; \
} while (0)
-static int phram_setup(const char *val, struct kernel_param *kp)
+/*
+ * This shall contain the module parameter if any. It is of the form:
+ * - phram=<device>,<address>,<size> for module case
+ * - phram.phram=<device>,<address>,<size> for built-in case
+ * We leave 64 bytes for the device name, 12 for the address and 12 for the
+ * size.
+ * Example: phram.phram=rootfs,0xa0000000,512Mi
+ */
+static __initdata char phram_paramline[64+12+12];
+
+static int __init phram_setup(const char *val)
{
char buf[64+12+12], *str = buf;
char *token[3];
@@ -282,12 +264,28 @@ static int phram_setup(const char *val, struct kernel_param *kp)
return ret;
}
-module_param_call(phram, phram_setup, NULL, NULL, 000);
+static int __init phram_param_call(const char *val, struct kernel_param *kp)
+{
+ /*
+ * This function is always called before 'init_phram()', whether
+ * built-in or module.
+ */
+ if (strlen(val) >= sizeof(phram_paramline))
+ return -ENOSPC;
+ strcpy(phram_paramline, val);
+
+ return 0;
+}
+
+module_param_call(phram, phram_param_call, NULL, NULL, 000);
MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
static int __init init_phram(void)
{
+ if (phram_paramline[0])
+ return phram_setup(phram_paramline);
+
return 0;
}
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 5d53c5760a6c..0c51b988e1f8 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -94,12 +94,48 @@
#include <linux/ioctl.h>
#include <asm/io.h>
#include <linux/pci.h>
-
#include <linux/mtd/mtd.h>
-#include <linux/mtd/pmc551.h>
+
+#define PMC551_VERSION \
+ "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
+
+#define PCI_VENDOR_ID_V3_SEMI 0x11b0
+#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
+
+#define PMC551_PCI_MEM_MAP0 0x50
+#define PMC551_PCI_MEM_MAP1 0x54
+#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
+#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
+#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
+#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
+
+#define PMC551_SDRAM_MA 0x60
+#define PMC551_SDRAM_CMD 0x62
+#define PMC551_DRAM_CFG 0x64
+#define PMC551_SYS_CTRL_REG 0x78
+
+#define PMC551_DRAM_BLK0 0x68
+#define PMC551_DRAM_BLK1 0x6c
+#define PMC551_DRAM_BLK2 0x70
+#define PMC551_DRAM_BLK3 0x74
+#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
+#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
+#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
+
+struct mypriv {
+ struct pci_dev *dev;
+ u_char *start;
+ u32 base_map0;
+ u32 curr_map0;
+ u32 asize;
+ struct mtd_info *nextpmc551;
+};
static struct mtd_info *pmc551list;
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+
static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mypriv *priv = mtd->priv;
@@ -115,16 +151,6 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
#endif
end = instr->addr + instr->len - 1;
-
- /* Is it past the end? */
- if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
- (long)end, (long)mtd->size);
-#endif
- return -EINVAL;
- }
-
eoff_hi = end & ~(priv->asize - 1);
soff_hi = instr->addr & ~(priv->asize - 1);
eoff_lo = end & (priv->asize - 1);
@@ -178,18 +204,6 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
#endif
- if (from + len > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
- (long)from + len, (long)mtd->size);
-#endif
- return -EINVAL;
- }
-
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
soff_hi = from & ~(priv->asize - 1);
soff_lo = from & (priv->asize - 1);
@@ -205,11 +219,12 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
-static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_unpoint()\n");
#endif
+ return 0;
}
static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -228,16 +243,6 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
#endif
end = from + len - 1;
-
- /* Is it past the end? */
- if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
- (long)end, (long)mtd->size);
-#endif
- return -EINVAL;
- }
-
soff_hi = from & ~(priv->asize - 1);
eoff_hi = end & ~(priv->asize - 1);
soff_lo = from & (priv->asize - 1);
@@ -295,16 +300,6 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
#endif
end = to + len - 1;
- /* Is it past the end? or did the u32 wrap? */
- if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
- "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
- (long)to);
-#endif
- return -EINVAL;
- }
-
soff_hi = to & ~(priv->asize - 1);
eoff_hi = end & ~(priv->asize - 1);
soff_lo = to & (priv->asize - 1);
@@ -358,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
* mechanism
* returns the size of the memory region found.
*/
-static u32 fixup_pmc551(struct pci_dev *dev)
+static int fixup_pmc551(struct pci_dev *dev)
{
#ifdef CONFIG_MTD_PMC551_BUGFIX
u32 dram_data;
@@ -668,7 +663,7 @@ static int __init init_pmc551(void)
struct mypriv *priv;
int found = 0;
struct mtd_info *mtd;
- u32 length = 0;
+ int length = 0;
if (msize) {
msize = (1 << (ffs(msize) - 1)) << 20;
@@ -786,11 +781,11 @@ static int __init init_pmc551(void)
mtd->size = msize;
mtd->flags = MTD_CAP_RAM;
- mtd->erase = pmc551_erase;
- mtd->read = pmc551_read;
- mtd->write = pmc551_write;
- mtd->point = pmc551_point;
- mtd->unpoint = pmc551_unpoint;
+ mtd->_erase = pmc551_erase;
+ mtd->_read = pmc551_read;
+ mtd->_write = pmc551_write;
+ mtd->_point = pmc551_point;
+ mtd->_unpoint = pmc551_unpoint;
mtd->type = MTD_RAM;
mtd->name = "PMC551 RAM board";
mtd->erasesize = 0x10000;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 288594163c22..8f52fc858e48 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -75,7 +75,7 @@ static slram_mtd_list_t *slram_mtdlist = NULL;
static int slram_erase(struct mtd_info *, struct erase_info *);
static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
resource_size_t *);
-static void slram_unpoint(struct mtd_info *, loff_t, size_t);
+static int slram_unpoint(struct mtd_info *, loff_t, size_t);
static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -83,21 +83,13 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
slram_priv_t *priv = mtd->priv;
- if (instr->addr + instr->len > mtd->size) {
- return(-EINVAL);
- }
-
memset(priv->start + instr->addr, 0xff, instr->len);
-
/* This'll catch a few races. Free the thing before returning :)
* I don't feel at all ashamed. This kind of thing is possible anyway
* with flash, but unlikely.
*/
-
instr->state = MTD_ERASE_DONE;
-
mtd_erase_callback(instr);
-
return(0);
}
@@ -106,20 +98,14 @@ static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
slram_priv_t *priv = mtd->priv;
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
- if (from + len > mtd->size)
- return -EINVAL;
-
*virt = priv->start + from;
*retlen = len;
return(0);
}
-static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
+ return 0;
}
static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -127,14 +113,7 @@ static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
{
slram_priv_t *priv = mtd->priv;
- if (from > mtd->size)
- return -EINVAL;
-
- if (from + len > mtd->size)
- len = mtd->size - from;
-
memcpy(buf, priv->start + from, len);
-
*retlen = len;
return(0);
}
@@ -144,11 +123,7 @@ static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
{
slram_priv_t *priv = mtd->priv;
- if (to + len > mtd->size)
- return -EINVAL;
-
memcpy(priv->start + to, buf, len);
-
*retlen = len;
return(0);
}
@@ -199,11 +174,11 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->name = name;
(*curmtd)->mtdinfo->size = length;
(*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
- (*curmtd)->mtdinfo->erase = slram_erase;
- (*curmtd)->mtdinfo->point = slram_point;
- (*curmtd)->mtdinfo->unpoint = slram_unpoint;
- (*curmtd)->mtdinfo->read = slram_read;
- (*curmtd)->mtdinfo->write = slram_write;
+ (*curmtd)->mtdinfo->_erase = slram_erase;
+ (*curmtd)->mtdinfo->_point = slram_point;
+ (*curmtd)->mtdinfo->_unpoint = slram_unpoint;
+ (*curmtd)->mtdinfo->_read = slram_read;
+ (*curmtd)->mtdinfo->_write = slram_write;
(*curmtd)->mtdinfo->owner = THIS_MODULE;
(*curmtd)->mtdinfo->type = MTD_RAM;
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 000000000000..797d43cd3550
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1147 @@
+/*
+ * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
+ * SPEAr platform
+ * The serial nor interface is largely based on drivers/mtd/m25p80.c,
+ * however the SPI interface has been replaced by SMI.
+ *
+ * Copyright © 2010 STMicroelectronics.
+ * Ashish Priyadarshi
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spear_smi.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* SMI clock rate */
+#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
+
+/* MAX time out to safely come out of a erase or write busy conditions */
+#define SMI_PROBE_TIMEOUT (HZ / 10)
+#define SMI_MAX_TIME_OUT (3 * HZ)
+
+/* timeout for command completion */
+#define SMI_CMD_TIMEOUT (HZ / 10)
+
+/* registers of smi */
+#define SMI_CR1 0x0 /* SMI control register 1 */
+#define SMI_CR2 0x4 /* SMI control register 2 */
+#define SMI_SR 0x8 /* SMI status register */
+#define SMI_TR 0xC /* SMI transmit register */
+#define SMI_RR 0x10 /* SMI receive register */
+
+/* defines for control_reg 1 */
+#define BANK_EN (0xF << 0) /* enables all banks */
+#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
+#define SW_MODE (0x1 << 28) /* enables SW Mode */
+#define WB_MODE (0x1 << 29) /* Write Burst Mode */
+#define FAST_MODE (0x1 << 15) /* Fast Mode */
+#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
+
+/* defines for control_reg 2 */
+#define SEND (0x1 << 7) /* Send data */
+#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
+#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
+#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
+#define WE (0x1 << 11) /* Write Enable */
+
+#define TX_LEN_SHIFT 0
+#define RX_LEN_SHIFT 4
+#define BANK_SHIFT 12
+
+/* defines for status register */
+#define SR_WIP 0x1 /* Write in progress */
+#define SR_WEL 0x2 /* Write enable latch */
+#define SR_BP0 0x4 /* Block protect 0 */
+#define SR_BP1 0x8 /* Block protect 1 */
+#define SR_BP2 0x10 /* Block protect 2 */
+#define SR_SRWD 0x80 /* SR write protect */
+#define TFF 0x100 /* Transfer Finished Flag */
+#define WCF 0x200 /* Transfer Finished Flag */
+#define ERF1 0x400 /* Forbidden Write Request */
+#define ERF2 0x800 /* Forbidden Access */
+
+#define WM_SHIFT 12
+
+/* flash opcodes */
+#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+
+/* Flash Device Ids maintenance section */
+
+/* data structure to maintain flash ids from different vendors */
+struct flash_device {
+ char *name;
+ u8 erase_cmd;
+ u32 device_id;
+ u32 pagesize;
+ unsigned long sectorsize;
+ unsigned long size_in_bytes;
+};
+
+#define FLASH_ID(n, es, id, psize, ssize, size) \
+{ \
+ .name = n, \
+ .erase_cmd = es, \
+ .device_id = id, \
+ .pagesize = psize, \
+ .sectorsize = ssize, \
+ .size_in_bytes = size \
+}
+
+static struct flash_device flash_devices[] = {
+ FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
+ FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
+ FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
+ FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
+ FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
+ FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
+ FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
+ FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
+ FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
+ FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
+ FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
+ FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
+ FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
+ FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
+ FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
+ FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
+ FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
+ FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
+ FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
+};
+
+/* Define spear specific structures */
+
+struct spear_snor_flash;
+
+/**
+ * struct spear_smi - Structure for SMI Device
+ *
+ * @clk: functional clock
+ * @status: current status register of SMI.
+ * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
+ * @lock: lock to prevent parallel access of SMI.
+ * @io_base: base address for registers of SMI.
+ * @pdev: platform device
+ * @cmd_complete: queue to wait for command completion of NOR-flash.
+ * @num_flashes: number of flashes actually present on board.
+ * @flash: separate structure for each Serial NOR-flash attached to SMI.
+ */
+struct spear_smi {
+ struct clk *clk;
+ u32 status;
+ unsigned long clk_rate;
+ struct mutex lock;
+ void __iomem *io_base;
+ struct platform_device *pdev;
+ wait_queue_head_t cmd_complete;
+ u32 num_flashes;
+ struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
+};
+
+/**
+ * struct spear_snor_flash - Structure for Serial NOR Flash
+ *
+ * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
+ * @dev_id: Device ID of NOR-flash.
+ * @lock: lock to manage flash read, write and erase operations
+ * @mtd: MTD info for each NOR-flash.
+ * @num_parts: Total number of partition in each bank of NOR-flash.
+ * @parts: Partition info for each bank of NOR-flash.
+ * @page_size: Page size of NOR-flash.
+ * @base_addr: Base address of NOR-flash.
+ * @erase_cmd: erase command may vary on different flash types
+ * @fast_mode: flash supports read in fast mode
+ */
+struct spear_snor_flash {
+ u32 bank;
+ u32 dev_id;
+ struct mutex lock;
+ struct mtd_info mtd;
+ u32 num_parts;
+ struct mtd_partition *parts;
+ u32 page_size;
+ void __iomem *base_addr;
+ u8 erase_cmd;
+ u8 fast_mode;
+};
+
+static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct spear_snor_flash, mtd);
+}
+
+/**
+ * spear_smi_read_sr - Read status register of flash through SMI
+ * @dev: structure of SMI information.
+ * @bank: bank to which flash is connected
+ *
+ * This routine will return the status register of the flash chip present at the
+ * given bank.
+ */
+static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in hw mode */
+ writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
+
+ /* performing a rsr instruction in hw mode */
+ writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
+ dev->io_base + SMI_CR2);
+
+ /* wait for tff */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* copy dev->status (lower 16 bits) in order to release lock */
+ if (ret > 0)
+ ret = dev->status & 0xffff;
+ else
+ ret = -EIO;
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_wait_till_ready - wait till flash is ready
+ * @dev: structure of SMI information.
+ * @bank: flash corresponding to this bank
+ * @timeout: timeout for busy wait condition
+ *
+ * This routine checks for WIP (write in progress) bit in Status register
+ * If successful the routine returns 0 else -EBUSY
+ */
+static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
+ unsigned long timeout)
+{
+ unsigned long finish;
+ int status;
+
+ finish = jiffies + timeout;
+ do {
+ status = spear_smi_read_sr(dev, bank);
+ if (status < 0)
+ continue; /* try till timeout */
+ else if (!(status & SR_WIP))
+ return 0;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, finish));
+
+ dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
+ return status;
+}
+
+/**
+ * spear_smi_int_handler - SMI Interrupt Handler.
+ * @irq: irq number
+ * @dev_id: structure of SMI device, embedded in dev_id.
+ *
+ * The handler clears all interrupt conditions and records the status in
+ * dev->status which is used by the driver later.
+ */
+static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
+{
+ u32 status = 0;
+ struct spear_smi *dev = dev_id;
+
+ status = readl(dev->io_base + SMI_SR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ /* copy the status register in dev->status */
+ dev->status |= status;
+
+ /* send the completion */
+ wake_up_interruptible(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * spear_smi_hw_init - initializes the smi controller.
+ * @dev: structure of smi device
+ *
+ * this routine initializes the smi controller wit the default values
+ */
+static void spear_smi_hw_init(struct spear_smi *dev)
+{
+ unsigned long rate = 0;
+ u32 prescale = 0;
+ u32 val;
+
+ rate = clk_get_rate(dev->clk);
+
+ /* functional clock of smi */
+ prescale = DIV_ROUND_UP(rate, dev->clk_rate);
+
+ /*
+ * setting the standard values, fast mode, prescaler for
+ * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
+ */
+ val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
+
+ mutex_lock(&dev->lock);
+ writel(val, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+}
+
+/**
+ * get_flash_index - match chip id from a flash list.
+ * @flash_id: a valid nor flash chip id obtained from board.
+ *
+ * try to validate the chip id by matching from a list, if not found then simply
+ * returns negative. In case of success returns index in to the flash devices
+ * array.
+ */
+static int get_flash_index(u32 flash_id)
+{
+ int index;
+
+ /* Matches chip-id to entire list of 'serial-nor flash' ids */
+ for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
+ if (flash_devices[index].device_id == flash_id)
+ return index;
+ }
+
+ /* Memory chip is not listed and not supported */
+ return -ENODEV;
+}
+
+/**
+ * spear_smi_write_enable - Enable the flash to do write operation
+ * @dev: structure of SMI device
+ * @bank: enable write for flash connected to this bank
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns 0 on success.
+ */
+static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in h/w mode */
+ writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ /* give the flash, write enable command */
+ writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ if (ret <= 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev,
+ "smi controller failed on write enable\n");
+ } else {
+ /* check whether write mode status is set for required bank */
+ if (dev->status & (1 << (bank + WM_SHIFT)))
+ ret = 0;
+ else {
+ dev_err(&dev->pdev->dev, "couldn't enable write\n");
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static inline u32
+get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
+{
+ u32 cmd;
+ u8 *x = (u8 *)&cmd;
+
+ x[0] = flash->erase_cmd;
+ x[1] = offset >> 16;
+ x[2] = offset >> 8;
+ x[3] = offset;
+
+ return cmd;
+}
+
+/**
+ * spear_smi_erase_sector - erase one sector of flash
+ * @dev: structure of SMI information
+ * @command: erase command to be send
+ * @bank: bank to which this command needs to be send
+ * @bytes: size of command
+ *
+ * Erase one sector of flash memory at offset ``offset'' which is any
+ * address within the sector which should be erased.
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int spear_smi_erase_sector(struct spear_smi *dev,
+ u32 bank, u32 command, u32 bytes)
+{
+ u32 ctrlreg1 = 0;
+ int ret;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
+
+ /* send command in sw mode */
+ writel(command, dev->io_base + SMI_TR);
+
+ writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
+ dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ if (ret <= 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev, "sector erase failed\n");
+ } else
+ ret = 0; /* success */
+
+ /* restore ctrl regs */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+/**
+ * spear_mtd_erase - perform flash erase operation as requested by user
+ * @mtd: Provides the memory characteristics
+ * @e_info: Provides the erase information
+ *
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ u32 addr, command, bank;
+ int len, ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ bank = flash->bank;
+ if (bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ addr = e_info->addr;
+ len = e_info->len;
+
+ mutex_lock(&flash->lock);
+
+ /* now erase sectors in loop */
+ while (len) {
+ command = get_sector_erase_cmd(flash, addr);
+ /* preparing the command for flash */
+ ret = spear_smi_erase_sector(dev, bank, command, 4);
+ if (ret) {
+ e_info->state = MTD_ERASE_FAILED;
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+ e_info->state = MTD_ERASE_DONE;
+ mtd_erase_callback(e_info);
+
+ return 0;
+}
+
+/**
+ * spear_mtd_read - performs flash read operation as requested by the user
+ * @mtd: MTD information of the memory bank
+ * @from: Address from which to start read
+ * @len: Number of bytes to be read
+ * @retlen: Fills the Number of bytes actually read
+ * @buf: Fills this after reading
+ *
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void *src;
+ u32 ctrlreg1, val;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ src = flash->base_addr + from;
+
+ mutex_lock(&flash->lock);
+
+ /* wait till previous write/erase is done. */
+ ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ mutex_lock(&dev->lock);
+ /* put smi in hw mode not wbt mode */
+ ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
+ val &= ~(SW_MODE | WB_MODE);
+ if (flash->fast_mode)
+ val |= FAST_MODE;
+
+ writel(val, dev->io_base + SMI_CR1);
+
+ memcpy_fromio(buf, (u8 *)src, len);
+
+ /* restore ctrl reg1 */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+
+ *retlen = len;
+ mutex_unlock(&flash->lock);
+
+ return 0;
+}
+
+static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
+ void *dest, const void *src, size_t len)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ /* wait until finished previous write command. */
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ /* put smi in write enable */
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ /* put smi in hw, write burst mode */
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ memcpy_toio(dest, src, len);
+
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+/**
+ * spear_mtd_write - performs write operation as requested by the user.
+ * @mtd: MTD information of the memory bank.
+ * @to: Address to write.
+ * @len: Number of bytes to be written.
+ * @retlen: Number of bytes actually wrote.
+ * @buf: Buffer from which the data to be taken.
+ *
+ * Write an address range to the flash chip. Data must be written in
+ * flash_page_size chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void *dest;
+ u32 page_offset, page_size;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ dest = flash->base_addr + to;
+ mutex_lock(&flash->lock);
+
+ page_offset = (u32)to % flash->page_size;
+
+ /* do if all the bytes fit onto one page */
+ if (page_offset + len <= flash->page_size) {
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
+ if (!ret)
+ *retlen += len;
+ } else {
+ u32 i;
+
+ /* the size of data remaining on the first page */
+ page_size = flash->page_size - page_offset;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
+ page_size);
+ if (ret)
+ goto err_write;
+ else
+ *retlen += page_size;
+
+ /* write everything in pagesize chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > flash->page_size)
+ page_size = flash->page_size;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
+ buf + i, page_size);
+ if (ret)
+ break;
+ else
+ *retlen += page_size;
+ }
+ }
+
+err_write:
+ mutex_unlock(&flash->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_probe_flash - Detects the NOR Flash chip.
+ * @dev: structure of SMI information.
+ * @bank: bank on which flash must be probed
+ *
+ * This routine will check whether there exists a flash chip on a given memory
+ * bank ID.
+ * Return index of the probed flash in flash devices structure
+ */
+static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 val = 0;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ dev->status = 0; /* Will be set in interrupt handler */
+ /* put smi in sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val | SW_MODE, dev->io_base + SMI_CR1);
+
+ /* send readid command in sw mode */
+ writel(OPCODE_RDID, dev->io_base + SMI_TR);
+
+ val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
+ (3 << RX_LEN_SHIFT) | TFIE;
+ writel(val, dev->io_base + SMI_CR2);
+
+ /* wait for TFF */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+ if (ret <= 0) {
+ ret = -ENODEV;
+ goto err_probe;
+ }
+
+ /* get memory chip id */
+ val = readl(dev->io_base + SMI_RR);
+ val &= 0x00ffffff;
+ ret = get_flash_index(val);
+
+err_probe:
+ /* clear sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+
+#ifdef CONFIG_OF
+static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *pp = NULL;
+ const __be32 *addr;
+ u32 val;
+ int len;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ of_property_read_u32(np, "clock-rate", &val);
+ pdata->clk_rate = val;
+
+ pdata->board_flash_info = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->board_flash_info),
+ GFP_KERNEL);
+
+ /* Fill structs for each subnode (flash device) */
+ while ((pp = of_get_next_child(np, pp))) {
+ struct spear_smi_flash_info *flash_info;
+
+ flash_info = &pdata->board_flash_info[i];
+ pdata->np[i] = pp;
+
+ /* Read base-addr and size from DT */
+ addr = of_get_property(pp, "reg", &len);
+ pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
+ pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
+
+ if (of_get_property(pp, "st,smi-fast-mode", NULL))
+ pdata->board_flash_info->fast_mode = 1;
+
+ i++;
+ }
+
+ pdata->num_flashes = i;
+
+ return 0;
+}
+#else
+static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int spear_smi_setup_banks(struct platform_device *pdev,
+ u32 bank, struct device_node *np)
+{
+ struct spear_smi *dev = platform_get_drvdata(pdev);
+ struct mtd_part_parser_data ppdata = {};
+ struct spear_smi_flash_info *flash_info;
+ struct spear_smi_plat_data *pdata;
+ struct spear_snor_flash *flash;
+ struct mtd_partition *parts = NULL;
+ int count = 0;
+ int flash_index;
+ int ret = 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (bank > pdata->num_flashes - 1)
+ return -EINVAL;
+
+ flash_info = &pdata->board_flash_info[bank];
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = kzalloc(sizeof(*flash), GFP_ATOMIC);
+ if (!flash)
+ return -ENOMEM;
+ flash->bank = bank;
+ flash->fast_mode = flash_info->fast_mode ? 1 : 0;
+ mutex_init(&flash->lock);
+
+ /* verify whether nor flash is really present on board */
+ flash_index = spear_smi_probe_flash(dev, bank);
+ if (flash_index < 0) {
+ dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
+ ret = flash_index;
+ goto err_probe;
+ }
+ /* map the memory for nor flash chip */
+ flash->base_addr = ioremap(flash_info->mem_base, flash_info->size);
+ if (!flash->base_addr) {
+ ret = -EIO;
+ goto err_probe;
+ }
+
+ dev->flash[bank] = flash;
+ flash->mtd.priv = dev;
+
+ if (flash_info->name)
+ flash->mtd.name = flash_info->name;
+ else
+ flash->mtd.name = flash_devices[flash_index].name;
+
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.writesize = 1;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.size = flash_info->size;
+ flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
+ flash->page_size = flash_devices[flash_index].pagesize;
+ flash->mtd.writebufsize = flash->page_size;
+ flash->erase_cmd = flash_devices[flash_index].erase_cmd;
+ flash->mtd._erase = spear_mtd_erase;
+ flash->mtd._read = spear_mtd_read;
+ flash->mtd._write = spear_mtd_write;
+ flash->dev_id = flash_devices[flash_index].device_id;
+
+ dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
+ flash->mtd.name, flash->mtd.size,
+ flash->mtd.size / (1024 * 1024));
+
+ dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024);
+
+#ifndef CONFIG_OF
+ if (flash_info->partitions) {
+ parts = flash_info->partitions;
+ count = flash_info->nr_partitions;
+ }
+#endif
+ ppdata.of_node = np;
+
+ ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
+ count);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
+ goto err_map;
+ }
+
+ return 0;
+
+err_map:
+ iounmap(flash->base_addr);
+
+err_probe:
+ kfree(flash);
+ return ret;
+}
+
+/**
+ * spear_smi_probe - Entry routine
+ * @pdev: platform device structure
+ *
+ * This is the first routine which gets invoked during booting and does all
+ * initialization/allocation work. The routine looks for available memory banks,
+ * and do proper init for any found one.
+ * Returns 0 on success, non zero otherwise
+ */
+static int __devinit spear_smi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_smi_plat_data *pdata = NULL;
+ struct spear_smi *dev;
+ struct resource *smi_base;
+ int irq, ret = 0;
+ int i;
+
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: ERROR: no memory", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdev->dev.platform_data = pdata;
+ ret = spear_smi_probe_config_dt(pdev, np);
+ if (ret) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata < 0) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ }
+
+ smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!smi_base) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "invalid smi base address\n");
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "invalid smi irq\n");
+ goto err;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
+ if (!dev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "mem alloc fail\n");
+ goto err;
+ }
+
+ smi_base = request_mem_region(smi_base->start, resource_size(smi_base),
+ pdev->name);
+ if (!smi_base) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "request mem region fail\n");
+ goto err_mem;
+ }
+
+ dev->io_base = ioremap(smi_base->start, resource_size(smi_base));
+ if (!dev->io_base) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "ioremap fail\n");
+ goto err_ioremap;
+ }
+
+ dev->pdev = pdev;
+ dev->clk_rate = pdata->clk_rate;
+
+ if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ)
+ dev->clk_rate = SMI_MAX_CLOCK_FREQ;
+
+ dev->num_flashes = pdata->num_flashes;
+
+ if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
+ dev_err(&pdev->dev, "exceeding max number of flashes\n");
+ dev->num_flashes = MAX_NUM_FLASH_CHIP;
+ }
+
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto err_clk;
+ }
+
+ ret = clk_enable(dev->clk);
+ if (ret)
+ goto err_clk_enable;
+
+ ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
+ goto err_irq;
+ }
+
+ mutex_init(&dev->lock);
+ init_waitqueue_head(&dev->cmd_complete);
+ spear_smi_hw_init(dev);
+ platform_set_drvdata(pdev, dev);
+
+ /* loop for each serial nor-flash which is connected to smi */
+ for (i = 0; i < dev->num_flashes; i++) {
+ ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "bank setup failed\n");
+ goto err_bank_setup;
+ }
+ }
+
+ return 0;
+
+err_bank_setup:
+ free_irq(irq, dev);
+ platform_set_drvdata(pdev, NULL);
+err_irq:
+ clk_disable(dev->clk);
+err_clk_enable:
+ clk_put(dev->clk);
+err_clk:
+ iounmap(dev->io_base);
+err_ioremap:
+ release_mem_region(smi_base->start, resource_size(smi_base));
+err_mem:
+ kfree(dev);
+err:
+ return ret;
+}
+
+/**
+ * spear_smi_remove - Exit routine
+ * @pdev: platform device structure
+ *
+ * free all allocations and delete the partitions.
+ */
+static int __devexit spear_smi_remove(struct platform_device *pdev)
+{
+ struct spear_smi *dev;
+ struct spear_smi_plat_data *pdata;
+ struct spear_snor_flash *flash;
+ struct resource *smi_base;
+ int ret;
+ int i, irq;
+
+ dev = platform_get_drvdata(pdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "dev is null\n");
+ return -ENODEV;
+ }
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ /* clean up for all nor flash */
+ for (i = 0; i < dev->num_flashes; i++) {
+ flash = dev->flash[i];
+ if (!flash)
+ continue;
+
+ /* clean up mtd stuff */
+ ret = mtd_device_unregister(&flash->mtd);
+ if (ret)
+ dev_err(&pdev->dev, "error removing mtd\n");
+
+ iounmap(flash->base_addr);
+ kfree(flash);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ free_irq(irq, dev);
+
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ iounmap(dev->io_base);
+ kfree(dev);
+
+ smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(smi_base->start, resource_size(smi_base));
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spear_smi *dev = platform_get_drvdata(pdev);
+
+ if (dev && dev->clk)
+ clk_disable(dev->clk);
+
+ return 0;
+}
+
+int spear_smi_resume(struct platform_device *pdev)
+{
+ struct spear_smi *dev = platform_get_drvdata(pdev);
+ int ret = -EPERM;
+
+ if (dev && dev->clk)
+ ret = clk_enable(dev->clk);
+
+ if (!ret)
+ spear_smi_hw_init(dev);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spear_smi_id_table[] = {
+ { .compatible = "st,spear600-smi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_smi_id_table);
+#endif
+
+static struct platform_driver spear_smi_driver = {
+ .driver = {
+ .name = "smi",
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spear_smi_id_table),
+ },
+ .probe = spear_smi_probe,
+ .remove = __devexit_p(spear_smi_remove),
+ .suspend = spear_smi_suspend,
+ .resume = spear_smi_resume,
+};
+
+static int spear_smi_init(void)
+{
+ return platform_driver_register(&spear_smi_driver);
+}
+module_init(spear_smi_init);
+
+static void spear_smi_exit(void)
+{
+ platform_driver_unregister(&spear_smi_driver);
+}
+module_exit(spear_smi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
+MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 5fc198350b94..ab8a2f4c8d60 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -175,9 +175,6 @@ static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
int err;
/* Sanity checks */
- if (instr->addr + instr->len > flash->mtd.size)
- return -EINVAL;
-
if ((uint32_t)instr->len % mtd->erasesize)
return -EINVAL;
@@ -223,16 +220,6 @@ static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
unsigned char command[4];
int ret;
- /* Sanity checking */
- if (len == 0)
- return 0;
-
- if (from + len > flash->mtd.size)
- return -EINVAL;
-
- if (retlen)
- *retlen = 0;
-
spi_message_init(&message);
memset(&transfer, 0, sizeof(transfer));
@@ -274,13 +261,6 @@ static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
int i, j, ret, bytes, copied = 0;
unsigned char command[5];
- /* Sanity checks */
- if (!len)
- return 0;
-
- if (to + len > flash->mtd.size)
- return -EINVAL;
-
if ((uint32_t)to % mtd->writesize)
return -EINVAL;
@@ -402,10 +382,11 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.erasesize = flash_info->erase_size;
flash->mtd.writesize = flash_info->page_size;
+ flash->mtd.writebufsize = flash_info->page_size;
flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
- flash->mtd.erase = sst25l_erase;
- flash->mtd.read = sst25l_read;
- flash->mtd.write = sst25l_write;
+ flash->mtd._erase = sst25l_erase;
+ flash->mtd._read = sst25l_read;
+ flash->mtd._write = sst25l_write;
dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
(long long)flash->mtd.size >> 10);
@@ -418,9 +399,9 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.numeraseregions);
- ret = mtd_device_parse_register(&flash->mtd, NULL, 0,
- data ? data->parts : NULL,
- data ? data->nr_parts : 0);
+ ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
if (ret) {
kfree(flash);
dev_set_drvdata(&spi->dev, NULL);
@@ -450,18 +431,7 @@ static struct spi_driver sst25l_driver = {
.remove = __devexit_p(sst25l_remove),
};
-static int __init sst25l_init(void)
-{
- return spi_register_driver(&sst25l_driver);
-}
-
-static void __exit sst25l_exit(void)
-{
- spi_unregister_driver(&sst25l_driver);
-}
-
-module_init(sst25l_init);
-module_exit(sst25l_exit);
+module_spi_driver(sst25l_driver);
MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 28646c95cfb8..3af351484098 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -56,7 +56,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
+ if (!mtd->_block_isbad) {
printk(KERN_ERR
"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 536bbceaeaad..d3cfe26beeaa 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -40,7 +40,7 @@ static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, void **mtdbuf, resource_size_t *phys);
-static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
static int get_chip(struct map_info *map, struct flchip *chip, int mode);
static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
static void put_chip(struct map_info *map, struct flchip *chip);
@@ -63,18 +63,18 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
- mtd->read = lpddr_read;
+ mtd->_read = lpddr_read;
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
mtd->flags &= ~MTD_BIT_WRITEABLE;
- mtd->erase = lpddr_erase;
- mtd->write = lpddr_write_buffers;
- mtd->writev = lpddr_writev;
- mtd->lock = lpddr_lock;
- mtd->unlock = lpddr_unlock;
+ mtd->_erase = lpddr_erase;
+ mtd->_write = lpddr_write_buffers;
+ mtd->_writev = lpddr_writev;
+ mtd->_lock = lpddr_lock;
+ mtd->_unlock = lpddr_unlock;
if (map_is_linear(map)) {
- mtd->point = lpddr_point;
- mtd->unpoint = lpddr_unpoint;
+ mtd->_point = lpddr_point;
+ mtd->_unpoint = lpddr_unpoint;
}
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
@@ -530,14 +530,12 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
- if (!map->virt || (adr + len > mtd->size))
+ if (!map->virt)
return -EINVAL;
/* ofs: offset within the first chip that the first read should start */
ofs = adr - (chipnum << lpddr->chipshift);
-
*mtdbuf = (void *)map->virt + chip->start + ofs;
- *retlen = 0;
while (len) {
unsigned long thislen;
@@ -575,11 +573,11 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
return 0;
}
-static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
- int chipnum = adr >> lpddr->chipshift;
+ int chipnum = adr >> lpddr->chipshift, err = 0;
unsigned long ofs;
/* ofs: offset within the first chip that the first read should start */
@@ -603,9 +601,11 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
chip->state = FL_READY;
- } else
+ } else {
printk(KERN_WARNING "%s: Warning: unpoint called on non"
"pointed region\n", map->name);
+ err = -EINVAL;
+ }
put_chip(map, chip);
mutex_unlock(&chip->mutex);
@@ -614,6 +614,8 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
ofs = 0;
chipnum++;
}
+
+ return err;
}
static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
@@ -637,13 +639,11 @@ static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
int chipnum;
unsigned long ofs, vec_seek, i;
int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
-
size_t len = 0;
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
- *retlen = 0;
if (!len)
return 0;
@@ -688,9 +688,6 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
ofs = instr->addr;
len = instr->len;
- if (ofs > mtd->size || (len + ofs) > mtd->size)
- return -EINVAL;
-
while (len > 0) {
ret = do_erase_oneblock(mtd, ofs);
if (ret)
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 650126c361f1..ef5cde84a8b3 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -164,8 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
- mtd_device_parse_register(state->mtd, part_probe_types, 0,
- pdata->parts, pdata->nr_parts);
+ mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+ pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, state);
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index f43b365b848c..080f06053bd4 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -196,7 +196,7 @@ static int __init init_dc21285(void)
dc21285_mtd->owner = THIS_MODULE;
- mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0);
+ mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
if(machine_is_ebsa285()) {
/*
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 33cce895859f..e4de96ba52b3 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -252,8 +252,8 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
}
- mtd_device_parse_register(state->mtd, part_probe_types, 0,
- pdata->parts, pdata->nr_parts);
+ mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+ pdata->parts, pdata->nr_parts);
return 0;
}
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 49c14187fc66..8ed6cb4529d8 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -85,8 +85,8 @@ static int __init h720x_mtd_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
- mtd_device_parse_register(mymtd, NULL, 0,
- h720x_partitions, NUM_PARTITIONS);
+ mtd_device_parse_register(mymtd, NULL, NULL,
+ h720x_partitions, NUM_PARTITIONS);
return 0;
}
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index f47aedb24366..834a06c56f56 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -91,7 +91,7 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
- mtd_device_parse_register(impa7_mtd[i], NULL, 0,
+ mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
partitions,
ARRAY_SIZE(partitions));
}
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 08c239604ee4..92e1f41634c7 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -72,7 +72,7 @@ static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
- return mtd_device_parse_register(p->info, NULL, 0, NULL, 0);
+ return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index fc7d4d0d9a4e..4a41ced0f710 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -226,7 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+ err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
if (err)
goto Error;
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 8b5410162d70..e864fc6c58f9 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -182,6 +182,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
{
struct flash_platform_data *plat = dev->dev.platform_data;
struct ixp4xx_flash_info *info;
+ struct mtd_part_parser_data ppdata = {
+ .origin = dev->resource->start,
+ };
int err = -1;
if (!plat)
@@ -247,7 +250,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16;
- err = mtd_device_parse_register(info->mtd, probes, dev->resource->start,
+ err = mtd_device_parse_register(info->mtd, probes, &ppdata,
plat->parts, plat->nr_parts);
if (err) {
printk(KERN_ERR "Could not parse partitions\n");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index dd0360ba2412..74bd98ee635f 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -27,17 +27,21 @@ static struct mtd_info *mymtd;
/* Is this really the vpp port? */
+static DEFINE_SPINLOCK(l440gx_vpp_lock);
+static int l440gx_vpp_refcnt;
static void l440gx_set_vpp(struct map_info *map, int vpp)
{
- unsigned long l;
+ unsigned long flags;
- l = inl(VPP_PORT);
+ spin_lock_irqsave(&l440gx_vpp_lock, flags);
if (vpp) {
- l |= 1;
+ if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
+ outl(inl(VPP_PORT) | 1, VPP_PORT);
} else {
- l &= ~1;
+ if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
+ outl(inl(VPP_PORT) & ~1, VPP_PORT);
}
- outl(l, VPP_PORT);
+ spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
}
static struct map_info l440gx_map = {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7b889de9477b..b5401e355745 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,6 +45,7 @@ struct ltq_mtd {
};
static char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -168,8 +169,9 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0,
- ltq_mtd_data->parts, ltq_mtd_data->nr_parts);
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
+ ltq_mtd_data->parts,
+ ltq_mtd_data->nr_parts);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 8fed58e3a4a8..3c7ad17fca78 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -199,8 +199,9 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, NULL, 0,
- latch_addr_data->parts, latch_addr_data->nr_parts);
+ mtd_device_parse_register(info->mtd, NULL, NULL,
+ latch_addr_data->parts,
+ latch_addr_data->nr_parts);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 0259cf583022..a3cfad392ed6 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,13 +294,24 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
}
+static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static int pcmcia_vpp_refcnt;
static void pcmciamtd_set_vpp(struct map_info *map, int on)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct pcmcia_device *link = dev->p_dev;
+ unsigned long flags;
pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
- pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
+ spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+ if (on) {
+ if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
+ pcmcia_fixup_vpp(link, dev->vpp);
+ } else {
+ if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
+ pcmcia_fixup_vpp(link, 0);
+ }
+ spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index abc562653b31..21b0b713cacb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,6 +27,8 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
+ spinlock_t vpp_lock;
+ int vpp_refcnt;
};
static int physmap_flash_remove(struct platform_device *dev)
@@ -63,12 +65,26 @@ static void physmap_set_vpp(struct map_info *map, int state)
{
struct platform_device *pdev;
struct physmap_flash_data *physmap_data;
+ struct physmap_flash_info *info;
+ unsigned long flags;
pdev = (struct platform_device *)map->map_priv_1;
physmap_data = pdev->dev.platform_data;
- if (physmap_data->set_vpp)
- physmap_data->set_vpp(pdev, state);
+ if (!physmap_data->set_vpp)
+ return;
+
+ info = platform_get_drvdata(pdev);
+
+ spin_lock_irqsave(&info->vpp_lock, flags);
+ if (state) {
+ if (++info->vpp_refcnt == 1) /* first nested 'on' */
+ physmap_data->set_vpp(pdev, 1);
+ } else {
+ if (--info->vpp_refcnt == 0) /* last nested 'off' */
+ physmap_data->set_vpp(pdev, 0);
+ }
+ spin_unlock_irqrestore(&info->vpp_lock, flags);
}
static const char *rom_probe_types[] = {
@@ -172,9 +188,11 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
+ spin_lock_init(&info->vpp_lock);
+
part_types = physmap_data->part_probe_types ? : part_probe_types;
- mtd_device_parse_register(info->cmtd, part_types, 0,
+ mtd_device_parse_register(info->cmtd, part_types, NULL,
physmap_data->parts, physmap_data->nr_parts);
return 0;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 45876d0e5b8e..891558de3ec1 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -222,8 +222,9 @@ static int platram_probe(struct platform_device *pdev)
/* check to see if there are any available partitions, or wether
* to add this device whole */
- err = mtd_device_parse_register(info->mtd, pdata->probes, 0,
- pdata->partitions, pdata->nr_partitions);
+ err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
+ pdata->partitions,
+ pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 436d121185b1..81884c277405 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,8 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
+ mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
+ flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 3da63fc6f16e..6f52e1f288b6 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -102,8 +102,8 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
info->mtd->owner = THIS_MODULE;
if (err)
goto err_out;
- err = mtd_device_parse_register(info->mtd, NULL, 0,
- pdata->parts, pdata->nr_parts);
+ err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index cbc3b7867910..a675bdbcb0fe 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -36,10 +36,22 @@ struct sa_info {
struct sa_subdev_info subdev[0];
};
+static DEFINE_SPINLOCK(sa1100_vpp_lock);
+static int sa1100_vpp_refcnt;
static void sa1100_set_vpp(struct map_info *map, int on)
{
struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
- subdev->plat->set_vpp(on);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sa1100_vpp_lock, flags);
+ if (on) {
+ if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
+ subdev->plat->set_vpp(1);
+ } else {
+ if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
+ subdev->plat->set_vpp(0);
+ }
+ spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
}
static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
@@ -252,8 +264,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
/*
* Partition selection stuff.
*/
- mtd_device_parse_register(info->mtd, part_probes, 0,
- plat->parts, plat->nr_parts);
+ mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
+ plat->nr_parts);
platform_set_drvdata(pdev, info);
err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 496c40704aff..9d900ada6708 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -92,8 +92,8 @@ static int __init init_soleng_maps(void)
mtd_device_register(eprom_mtd, NULL, 0);
}
- mtd_device_parse_register(flash_mtd, probes, 0,
- superh_se_partitions, NUM_PARTITIONS);
+ mtd_device_parse_register(flash_mtd, probes, NULL,
+ superh_se_partitions, NUM_PARTITIONS);
return 0;
}
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 6793074f3f40..cfff454f628b 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -85,7 +85,7 @@ static int __init uclinux_mtd_init(void)
}
mtd->owner = THIS_MODULE;
- mtd->point = uclinux_point;
+ mtd->_point = uclinux_point;
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 3a04b078576a..2e2b0945edc7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -360,9 +360,6 @@ static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
int index = 0, retval, partition, leftover, numblocks;
unsigned char cx;
- if (len < 1)
- return -EIO;
-
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
@@ -434,11 +431,6 @@ static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
partition = mpart->partition;
card = maple_get_drvdata(mdev);
- /* simple sanity checks */
- if (len < 1) {
- error = -EIO;
- goto failed;
- }
numblocks = card->parts[partition].numblocks;
if (to + len > numblocks * card->blocklen)
len = numblocks * card->blocklen - to;
@@ -544,9 +536,9 @@ static void vmu_queryblocks(struct mapleq *mq)
mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
mtd_cur->size = part_cur->numblocks * card->blocklen;
mtd_cur->erasesize = card->blocklen;
- mtd_cur->write = vmu_flash_write;
- mtd_cur->read = vmu_flash_read;
- mtd_cur->sync = vmu_flash_sync;
+ mtd_cur->_write = vmu_flash_write;
+ mtd_cur->_read = vmu_flash_read;
+ mtd_cur->_sync = vmu_flash_sync;
mtd_cur->writesize = card->blocklen;
mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index aa7e0cb2893c..71b0ba797912 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -142,7 +142,7 @@ static int __init init_sbc82xx_flash(void)
nr_parts = ARRAY_SIZE(smallflash_parts);
}
- mtd_device_parse_register(sbcmtd[i], part_probes, 0,
+ mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
defparts, nr_parts);
}
return 0;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 424ca5f93c6c..f1f06715d4e0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -233,6 +233,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
ret = __get_mtd_device(dev->mtd);
if (ret)
goto error_release;
+ dev->file_mode = mode;
unlock:
dev->open++;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index af6591237b9b..6c6d80736fad 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -321,8 +321,12 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
- /* It was the last usage. Free the cache */
- mtd_sync(mbd->mtd);
+ /*
+ * It was the last usage. Free the cache, but only sync if
+ * opened for writing.
+ */
+ if (mbd->file_mode & FMODE_WRITE)
+ mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index c57ae92ebda4..55d82321d307 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -405,7 +405,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->write_oob)
+ if (!mtd->_write_oob)
ret = -EOPNOTSUPP;
else
ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
@@ -576,7 +576,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
!access_ok(VERIFY_READ, req.usr_data, req.len) ||
!access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
return -EFAULT;
- if (!mtd->write_oob)
+ if (!mtd->_write_oob)
return -EOPNOTSUPP;
ops.mode = req.mode;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1ed5103b219b..b9000563b9f4 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,8 +72,6 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret = 0, err;
int i;
- *retlen = 0;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
@@ -126,11 +124,6 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
int err = -EINVAL;
int i;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
- *retlen = 0;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
@@ -145,11 +138,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
else
size = len;
- if (!(subdev->flags & MTD_WRITEABLE))
- err = -EROFS;
- else
- err = mtd_write(subdev, to, size, &retsize, buf);
-
+ err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
@@ -176,19 +165,10 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
int i;
int err = -EINVAL;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
- *retlen = 0;
-
/* Calculate total length of data */
for (i = 0; i < count; i++)
total_len += vecs[i].iov_len;
- /* Do not allow write past end of device */
- if ((to + total_len) > mtd->size)
- return -EINVAL;
-
/* Check alignment */
if (mtd->writesize > 1) {
uint64_t __to = to;
@@ -224,12 +204,8 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
old_iov_len = vecs_copy[entry_high].iov_len;
vecs_copy[entry_high].iov_len = size;
- if (!(subdev->flags & MTD_WRITEABLE))
- err = -EROFS;
- else
- err = mtd_writev(subdev, &vecs_copy[entry_low],
- entry_high - entry_low + 1, to,
- &retsize);
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to, &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
@@ -403,15 +379,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
uint64_t length, offset = 0;
struct erase_info *erase;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
- if (instr->addr > concat->mtd.size)
- return -EINVAL;
-
- if (instr->len + instr->addr > concat->mtd.size)
- return -EINVAL;
-
/*
* Check for proper erase block alignment of the to-be-erased area.
* It is easier to do this based on the super device's erase
@@ -459,8 +426,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EINVAL;
}
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
/* make a local copy of instr to avoid modifying the caller's struct */
erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
@@ -499,10 +464,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
else
erase->len = length;
- if (!(subdev->flags & MTD_WRITEABLE)) {
- err = -EROFS;
- break;
- }
length -= erase->len;
if ((err = concat_dev_erase(subdev, erase))) {
/* sanity check: should never happen since
@@ -538,9 +499,6 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
uint64_t size;
@@ -575,9 +533,6 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = 0;
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
uint64_t size;
@@ -650,9 +605,6 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
if (!mtd_can_have_bb(concat->subdev[0]))
return res;
- if (ofs > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -673,12 +625,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if (!mtd_can_have_bb(concat->subdev[0]))
- return 0;
-
- if (ofs > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -716,10 +662,6 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
continue;
}
- /* we've found the subdev over which the mapping will reside */
- if (offset + len > subdev->size)
- return (unsigned long) -EINVAL;
-
return mtd_get_unmapped_area(subdev, len, offset, flags);
}
@@ -777,16 +719,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
- if (subdev[0]->writev)
- concat->mtd.writev = concat_writev;
- if (subdev[0]->read_oob)
- concat->mtd.read_oob = concat_read_oob;
- if (subdev[0]->write_oob)
- concat->mtd.write_oob = concat_write_oob;
- if (subdev[0]->block_isbad)
- concat->mtd.block_isbad = concat_block_isbad;
- if (subdev[0]->block_markbad)
- concat->mtd.block_markbad = concat_block_markbad;
+ if (subdev[0]->_writev)
+ concat->mtd._writev = concat_writev;
+ if (subdev[0]->_read_oob)
+ concat->mtd._read_oob = concat_read_oob;
+ if (subdev[0]->_write_oob)
+ concat->mtd._write_oob = concat_write_oob;
+ if (subdev[0]->_block_isbad)
+ concat->mtd._block_isbad = concat_block_isbad;
+ if (subdev[0]->_block_markbad)
+ concat->mtd._block_markbad = concat_block_markbad;
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
@@ -833,8 +775,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
if (concat->mtd.writesize != subdev[i]->writesize ||
concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
concat->mtd.oobsize != subdev[i]->oobsize ||
- !concat->mtd.read_oob != !subdev[i]->read_oob ||
- !concat->mtd.write_oob != !subdev[i]->write_oob) {
+ !concat->mtd._read_oob != !subdev[i]->_read_oob ||
+ !concat->mtd._write_oob != !subdev[i]->_write_oob) {
kfree(concat);
printk("Incompatible OOB or ECC data on \"%s\"\n",
subdev[i]->name);
@@ -849,15 +791,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->num_subdev = num_devs;
concat->mtd.name = name;
- concat->mtd.erase = concat_erase;
- concat->mtd.read = concat_read;
- concat->mtd.write = concat_write;
- concat->mtd.sync = concat_sync;
- concat->mtd.lock = concat_lock;
- concat->mtd.unlock = concat_unlock;
- concat->mtd.suspend = concat_suspend;
- concat->mtd.resume = concat_resume;
- concat->mtd.get_unmapped_area = concat_get_unmapped_area;
+ concat->mtd._erase = concat_erase;
+ concat->mtd._read = concat_read;
+ concat->mtd._write = concat_write;
+ concat->mtd._sync = concat_sync;
+ concat->mtd._lock = concat_lock;
+ concat->mtd._unlock = concat_unlock;
+ concat->mtd._suspend = concat_suspend;
+ concat->mtd._resume = concat_resume;
+ concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9a9ce71a71fc..c837507dfb1c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,7 @@ static LIST_HEAD(mtd_notifiers);
*/
static void mtd_release(struct device *dev)
{
- struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
dev_t index = MTD_DEVT(mtd->index);
/* remove /dev/mtdXro node if needed */
@@ -126,7 +126,7 @@ static int mtd_cls_resume(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- if (mtd && mtd->resume)
+ if (mtd)
mtd_resume(mtd);
return 0;
}
@@ -610,8 +610,8 @@ int __get_mtd_device(struct mtd_info *mtd)
if (!try_module_get(mtd->owner))
return -ENODEV;
- if (mtd->get_device) {
- err = mtd->get_device(mtd);
+ if (mtd->_get_device) {
+ err = mtd->_get_device(mtd);
if (err) {
module_put(mtd->owner);
@@ -675,14 +675,267 @@ void __put_mtd_device(struct mtd_info *mtd)
--mtd->usecount;
BUG_ON(mtd->usecount < 0);
- if (mtd->put_device)
- mtd->put_device(mtd);
+ if (mtd->_put_device)
+ mtd->_put_device(mtd);
module_put(mtd->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
/*
+ * Erase is an asynchronous operation. Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ if (!instr->len) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+ }
+ return mtd->_erase(mtd, instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys)
+{
+ *retlen = 0;
+ *virt = NULL;
+ if (phys)
+ *phys = 0;
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_point(mtd, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unpoint(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags)
+{
+ if (!mtd->_get_unmapped_area)
+ return -EOPNOTSUPP;
+ if (offset > mtd->size || len > mtd->size - offset)
+ return -EINVAL;
+ return mtd->_get_unmapped_area(mtd, len, offset, flags);
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_read(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read);
+
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (to < 0 || to > mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write);
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_panic_write)
+ return -EOPNOTSUPP;
+ if (to < 0 || to > mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_panic_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_panic_write);
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len)
+{
+ if (!mtd->_get_fact_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_fact_prot_info(mtd, buf, len);
+}
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
+
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
+
+int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len)
+{
+ if (!mtd->_get_user_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_user_prot_info(mtd, buf, len);
+}
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
+
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
+
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_write_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
+
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_lock_user_prot_reg(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
+
+/* Chip-supported device locking */
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_lock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_lock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock);
+
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_unlock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unlock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unlock);
+
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_is_locked)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_is_locked(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->_block_isbad)
+ return 0;
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ return mtd->_block_isbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
+
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->_block_markbad)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ return mtd->_block_markbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+/*
* default_mtd_writev - the default writev method
* @mtd: mtd device description object pointer
* @vecs: the vectors to write
@@ -729,9 +982,11 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
*retlen = 0;
- if (!mtd->writev)
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!mtd->_writev)
return default_mtd_writev(mtd, vecs, count, to, retlen);
- return mtd->writev(mtd, vecs, count, to, retlen);
+ return mtd->_writev(mtd, vecs, count, to, retlen);
}
EXPORT_SYMBOL_GPL(mtd_writev);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 3ce99e00a49e..ae36d7e1e913 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,7 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
cxt->nextpage = 0;
}
- while (mtd_can_have_bb(mtd)) {
+ while (1) {
ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
@@ -199,9 +199,9 @@ badblock:
return;
}
- if (mtd_can_have_bb(mtd) && ret == -EIO) {
+ if (ret == -EIO) {
ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
- if (ret < 0) {
+ if (ret < 0 && ret != -EOPNOTSUPP) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
}
@@ -257,8 +257,7 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
- if (mtd_can_have_bb(mtd) &&
- mtd_block_isbad(mtd, page * record_size))
+ if (mtd_block_isbad(mtd, page * record_size))
continue;
/* Assume the page is used */
mark_page_used(cxt, page);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3d44c3416b4..9651c06de0a9 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,12 +65,8 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
int res;
stats = part->master->ecc_stats;
-
- if (from >= mtd->size)
- len = 0;
- else if (from + len > mtd->size)
- len = mtd->size - from;
- res = mtd_read(part->master, from + part->offset, len, retlen, buf);
+ res = part->master->_read(part->master, from + part->offset, len,
+ retlen, buf);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -84,19 +80,16 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct mtd_part *part = PART(mtd);
- if (from >= mtd->size)
- len = 0;
- else if (from + len > mtd->size)
- len = mtd->size - from;
- return mtd_point(part->master, from + part->offset, len, retlen,
- virt, phys);
+
+ return part->master->_point(part->master, from + part->offset, len,
+ retlen, virt, phys);
}
-static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
- mtd_unpoint(part->master, from + part->offset, len);
+ return part->master->_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -107,7 +100,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = PART(mtd);
offset += part->offset;
- return mtd_get_unmapped_area(part->master, len, offset, flags);
+ return part->master->_get_unmapped_area(part->master, len, offset,
+ flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -138,7 +132,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = mtd_read_oob(part->master, from + part->offset, ops);
+ res = part->master->_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
@@ -152,55 +146,46 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
+ return part->master->_read_user_prot_reg(part->master, from, len,
+ retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return mtd_get_user_prot_info(part->master, buf, len);
+ return part->master->_get_user_prot_info(part->master, buf, len);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
+ return part->master->_read_fact_prot_reg(part->master, from, len,
+ retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return mtd_get_fact_prot_info(part->master, buf, len);
+ return part->master->_get_fact_prot_info(part->master, buf, len);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (to >= mtd->size)
- len = 0;
- else if (to + len > mtd->size)
- len = mtd->size - to;
- return mtd_write(part->master, to + part->offset, len, retlen, buf);
+ return part->master->_write(part->master, to + part->offset, len,
+ retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (to >= mtd->size)
- len = 0;
- else if (to + len > mtd->size)
- len = mtd->size - to;
- return mtd_panic_write(part->master, to + part->offset, len, retlen,
- buf);
+ return part->master->_panic_write(part->master, to + part->offset, len,
+ retlen, buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -208,50 +193,43 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
if (to >= mtd->size)
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return mtd_write_oob(part->master, to + part->offset, ops);
+ return part->master->_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
+ return part->master->_write_user_prot_reg(part->master, from, len,
+ retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return mtd_lock_user_prot_reg(part->master, from, len);
+ return part->master->_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return mtd_writev(part->master, vecs, count, to + part->offset,
- retlen);
+ return part->master->_writev(part->master, vecs, count,
+ to + part->offset, retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
int ret;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (instr->addr >= mtd->size)
- return -EINVAL;
+
instr->addr += part->offset;
- ret = mtd_erase(part->master, instr);
+ ret = part->master->_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
@@ -262,7 +240,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
void mtd_erase_callback(struct erase_info *instr)
{
- if (instr->mtd->erase == part_erase) {
+ if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -277,52 +255,44 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
- return mtd_lock(part->master, ofs + part->offset, len);
+ return part->master->_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
- return mtd_unlock(part->master, ofs + part->offset, len);
+ return part->master->_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
- return mtd_is_locked(part->master, ofs + part->offset, len);
+ return part->master->_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- mtd_sync(part->master);
+ part->master->_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- return mtd_suspend(part->master);
+ return part->master->_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- mtd_resume(part->master);
+ part->master->_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
- if (ofs >= mtd->size)
- return -EINVAL;
ofs += part->offset;
- return mtd_block_isbad(part->master, ofs);
+ return part->master->_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -330,12 +300,8 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_part *part = PART(mtd);
int res;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (ofs >= mtd->size)
- return -EINVAL;
ofs += part->offset;
- res = mtd_block_markbad(part->master, ofs);
+ res = part->master->_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
@@ -410,54 +376,55 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
*/
slave->mtd.dev.parent = master->dev.parent;
- slave->mtd.read = part_read;
- slave->mtd.write = part_write;
+ slave->mtd._read = part_read;
+ slave->mtd._write = part_write;
- if (master->panic_write)
- slave->mtd.panic_write = part_panic_write;
+ if (master->_panic_write)
+ slave->mtd._panic_write = part_panic_write;
- if (master->point && master->unpoint) {
- slave->mtd.point = part_point;
- slave->mtd.unpoint = part_unpoint;
+ if (master->_point && master->_unpoint) {
+ slave->mtd._point = part_point;
+ slave->mtd._unpoint = part_unpoint;
}
- if (master->get_unmapped_area)
- slave->mtd.get_unmapped_area = part_get_unmapped_area;
- if (master->read_oob)
- slave->mtd.read_oob = part_read_oob;
- if (master->write_oob)
- slave->mtd.write_oob = part_write_oob;
- if (master->read_user_prot_reg)
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
- if (master->read_fact_prot_reg)
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
- if (master->write_user_prot_reg)
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
- if (master->lock_user_prot_reg)
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
- if (master->get_user_prot_info)
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
- if (master->get_fact_prot_info)
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
- if (master->sync)
- slave->mtd.sync = part_sync;
- if (!partno && !master->dev.class && master->suspend && master->resume) {
- slave->mtd.suspend = part_suspend;
- slave->mtd.resume = part_resume;
+ if (master->_get_unmapped_area)
+ slave->mtd._get_unmapped_area = part_get_unmapped_area;
+ if (master->_read_oob)
+ slave->mtd._read_oob = part_read_oob;
+ if (master->_write_oob)
+ slave->mtd._write_oob = part_write_oob;
+ if (master->_read_user_prot_reg)
+ slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
+ if (master->_read_fact_prot_reg)
+ slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->_write_user_prot_reg)
+ slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
+ if (master->_lock_user_prot_reg)
+ slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->_get_user_prot_info)
+ slave->mtd._get_user_prot_info = part_get_user_prot_info;
+ if (master->_get_fact_prot_info)
+ slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
+ if (master->_sync)
+ slave->mtd._sync = part_sync;
+ if (!partno && !master->dev.class && master->_suspend &&
+ master->_resume) {
+ slave->mtd._suspend = part_suspend;
+ slave->mtd._resume = part_resume;
}
- if (master->writev)
- slave->mtd.writev = part_writev;
- if (master->lock)
- slave->mtd.lock = part_lock;
- if (master->unlock)
- slave->mtd.unlock = part_unlock;
- if (master->is_locked)
- slave->mtd.is_locked = part_is_locked;
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
- slave->mtd.erase = part_erase;
+ if (master->_writev)
+ slave->mtd._writev = part_writev;
+ if (master->_lock)
+ slave->mtd._lock = part_lock;
+ if (master->_unlock)
+ slave->mtd._unlock = part_unlock;
+ if (master->_is_locked)
+ slave->mtd._is_locked = part_is_locked;
+ if (master->_block_isbad)
+ slave->mtd._block_isbad = part_block_isbad;
+ if (master->_block_markbad)
+ slave->mtd._block_markbad = part_block_markbad;
+ slave->mtd._erase = part_erase;
slave->master = master;
slave->offset = part->offset;
@@ -549,7 +516,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
}
slave->mtd.ecclayout = master->ecclayout;
- if (master->block_isbad) {
+ slave->mtd.ecc_strength = master->ecc_strength;
+ if (master->_block_isbad) {
uint64_t offs = 0;
while (offs < slave->mtd.size) {
@@ -761,7 +729,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
for ( ; ret <= 0 && *types; types++) {
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
- parser = get_partition_parser(*types);
+ parser = get_partition_parser(*types);
if (!parser)
continue;
ret = (*parser->parse_fn)(master, pparts, data);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index a3c4de551ebe..7d17cecad69d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -314,6 +314,26 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
+config MTD_NAND_DOCG4
+ tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ select BCH
+ select BITREVERSE
+ help
+ Support for diskonchip G4 nand flash, found in various smartphones and
+ PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
+ Portege G900, Asus P526, and O2 XDA Zinc.
+
+ With this driver you will be able to use UBI and create a ubifs on the
+ device, so you may wish to consider enabling UBI and UBIFS as well.
+
+ These devices ship with the Mys/Sandisk SAFTL formatting, for which
+ there is currently no mtd parser, so you may want to use command line
+ partitioning to segregate write-protected blocks. On the Treo680, the
+ first five erase blocks (256KiB each) are write-protected, followed
+ by the block containing the saftl partition table. This is probably
+ typical.
+
config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
depends on ARCH_PXA
@@ -421,7 +441,6 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
- select MTD_CMDLINE_PARTS
help
Enables NAND Flash support for IMX23 or IMX28.
The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19bc8cb1d187..d4b4d8739bd8 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6a5ff64a139e..4f20e1d8bef1 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -585,12 +585,13 @@ static int alauda_init_media(struct alauda *al)
mtd->writesize = 1<<card->pageshift;
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->read = alauda_read;
- mtd->write = alauda_write;
- mtd->erase = alauda_erase;
- mtd->block_isbad = alauda_isbad;
+ mtd->_read = alauda_read;
+ mtd->_write = alauda_write;
+ mtd->_erase = alauda_erase;
+ mtd->_block_isbad = alauda_isbad;
mtd->priv = al;
mtd->owner = THIS_MODULE;
+ mtd->ecc_strength = 1;
err = mtd_device_register(mtd, NULL, 0);
if (err) {
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ae7e37d9ac17..2165576a1c67 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -603,6 +603,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->ecc.hwctl = atmel_nand_hwctl;
nand_chip->ecc.read_page = atmel_nand_read_page;
nand_chip->ecc.bytes = 4;
+ nand_chip->ecc.strength = 1;
}
nand_chip->chip_delay = 20; /* 20us command delay time */
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 64c9cbaf86a1..6908cdde3065 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -475,6 +475,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
this->badblock_pattern = &largepage_bbt;
}
+
+ /*
+ * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
+ * conservative guess, given 13 ecc bytes and using bch alg.
+ * (Assume Galois field order m=15 to allow a margin of error.)
+ */
+ this->ecc.strength = 6;
+
#endif
/* Now finish off the scan, now that ecc.layout has been initialized. */
@@ -487,7 +495,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
/* Register the partitions */
board_mtd->name = "bcm_umi-nand";
- mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0);
+ mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index dd899cb5d366..d7b86b925de5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -702,9 +702,11 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
if (likely(mtd->writesize >= 512)) {
chip->ecc.size = 512;
chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
} else {
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
SSYNC();
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 72d3f23490c5..2a96e1a12062 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -783,6 +783,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
cafe->nand.ecc.size = mtd->writesize;
cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
cafe->nand.ecc.correct = (void *)cafe_nand_bug;
@@ -799,7 +800,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mtd);
mtd->name = "cafe_nand";
- mtd_device_parse_register(mtd, part_probes, 0, NULL, 0);
+ mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
goto out;
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 737ef9a04fdb..1024bfc05c86 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -219,7 +219,7 @@ static int __init cmx270_init(void)
}
/* Register the partitions */
- ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0,
+ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
partition_info, NUM_PARTITIONS);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 414afa793563..821c34c62500 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -248,6 +248,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
goto out_ior;
}
+ this->ecc.strength = 1;
+
new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
cs553x_mtd[cs] = new_mtd;
@@ -313,7 +315,7 @@ static int __init cs553x_init(void)
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
if (cs553x_mtd[i]) {
/* If any devices registered, return success. Else the last error. */
- mtd_device_parse_register(cs553x_mtd[i], NULL, 0,
+ mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
NULL, 0);
err = 0;
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 6e566156956f..d94b03c207af 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -641,6 +641,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.bytes = 3;
}
info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
break;
default:
ret = -EINVAL;
@@ -752,8 +753,8 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- ret = mtd_device_parse_register(&info->mtd, NULL, 0,
- pdata->parts, pdata->nr_parts);
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
if (ret < 0)
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3984d488f9ab..a9e57d686297 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1590,6 +1590,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ECC_15BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE)))) {
/* if MLC OOB size is large enough, use 15bit ECC*/
+ denali->nand.ecc.strength = 15;
denali->nand.ecc.layout = &nand_15bit_oob;
denali->nand.ecc.bytes = ECC_15BITS;
iowrite32(15, denali->flash_reg + ECC_CORRECTION);
@@ -1600,12 +1601,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
" contain 8bit ECC correction codes");
goto failed_req_irq;
} else {
+ denali->nand.ecc.strength = 8;
denali->nand.ecc.layout = &nand_8bit_oob;
denali->nand.ecc.bytes = ECC_8BITS;
iowrite32(8, denali->flash_reg + ECC_CORRECTION);
}
denali->nand.ecc.bytes *= denali->devnum;
+ denali->nand.ecc.strength *= denali->devnum;
denali->nand.ecc.layout->eccbytes *=
denali->mtd.writesize / ECC_SECTOR_SIZE;
denali->nand.ecc.layout->oobfree[0].offset =
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index df921e7a496c..e2ca067631cf 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1653,6 +1653,7 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = 512;
nand->ecc.bytes = 6;
+ nand->ecc.strength = 2;
nand->bbt_options = NAND_BBT_USE_FLASH;
doc->physadr = physadr;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 000000000000..b08202664543
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1377 @@
+/*
+ * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
+ *
+ * mtd nand driver for M-Systems DiskOnChip G4
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
+ * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
+ * Should work on these as well. Let me know!
+ *
+ * TODO:
+ *
+ * Mechanism for management of password-protected areas
+ *
+ * Hamming ecc when reading oob only
+ *
+ * According to the M-Sys documentation, this device is also available in a
+ * "dual-die" configuration having a 256MB capacity, but no mechanism for
+ * detecting this variant is documented. Currently this driver assumes 128MB
+ * capacity.
+ *
+ * Support for multiple cascaded devices ("floors"). Not sure which gadgets
+ * contain multiple G4s in a cascaded configuration, if any.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+#include <linux/bitrev.h>
+
+/*
+ * You'll want to ignore badblocks if you're reading a partition that contains
+ * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
+ * it does not use mtd nand's method for marking bad blocks (using oob area).
+ * This will also skip the check of the "page written" flag.
+ */
+static bool ignore_badblocks;
+module_param(ignore_badblocks, bool, 0);
+MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
+
+struct docg4_priv {
+ struct mtd_info *mtd;
+ struct device *dev;
+ void __iomem *virtadr;
+ int status;
+ struct {
+ unsigned int command;
+ int column;
+ int page;
+ } last_command;
+ uint8_t oob_buf[16];
+ uint8_t ecc_buf[7];
+ int oob_page;
+ struct bch_control *bch;
+};
+
+/*
+ * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
+ * shared with other diskonchip devices (P3, G3 at least).
+ *
+ * Functions with names prefixed with docg4_ are mtd / nand interface functions
+ * (though they may also be called internally). All others are internal.
+ */
+
+#define DOC_IOSPACE_DATA 0x0800
+
+/* register offsets */
+#define DOC_CHIPID 0x1000
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_DATAEND 0x101e
+#define DOC_NOP 0x103e
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+#define DOCG4_MYSTERY_REG 0x1050
+
+/* apparently used only to write oob bytes 6 and 7 */
+#define DOCG4_OOB_6_7 0x1052
+
+/* DOC_FLASHSEQUENCE register commands */
+#define DOC_SEQ_RESET 0x00
+#define DOCG4_SEQ_PAGE_READ 0x03
+#define DOCG4_SEQ_FLUSH 0x29
+#define DOCG4_SEQ_PAGEWRITE 0x16
+#define DOCG4_SEQ_PAGEPROG 0x1e
+#define DOCG4_SEQ_BLOCKERASE 0x24
+
+/* DOC_FLASHCOMMAND register commands */
+#define DOCG4_CMD_PAGE_READ 0x00
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOCG4_CMD_FLUSH 0x70
+#define DOCG4_CMD_READ2 0x30
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOCG4_CMD_PAGEWRITE 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_RESET 0xff
+
+/* DOC_POWERMODE register bits */
+#define DOC_POWERDOWN_READY 0x80
+
+/* DOC_FLASHCONTROL register bits */
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN 0x40
+#define DOC_CTRL_FLASHREADY 0x01
+
+/* DOC_ECCCONF0 register bits */
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_UNKNOWN 0x2000
+#define DOC_ECCCONF0_ECC_ENABLE 0x1000
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/* DOC_ECCCONF1 register bits */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_ECC_ENABLE 0x07
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+
+/* DOC_ASICMODE register bits */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/* good status values read after read/write/erase operations */
+#define DOCG4_PROGSTATUS_GOOD 0x51
+#define DOCG4_PROGSTATUS_GOOD_2 0xe0
+
+/*
+ * On read operations (page and oob-only), the first byte read from I/O reg is a
+ * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
+ * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
+ */
+#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
+
+/* anatomy of the device */
+#define DOCG4_CHIP_SIZE 0x8000000
+#define DOCG4_PAGE_SIZE 0x200
+#define DOCG4_PAGES_PER_BLOCK 0x200
+#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
+#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
+#define DOCG4_OOB_SIZE 0x10
+#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
+#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
+#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
+
+/* all but the last byte is included in ecc calculation */
+#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
+
+#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
+
+/* expected values from the ID registers */
+#define DOCG4_IDREG1_VALUE 0x0400
+#define DOCG4_IDREG2_VALUE 0xfbff
+
+/* primitive polynomial used to build the Galois field used by hw ecc gen */
+#define DOCG4_PRIMITIVE_POLY 0x4443
+
+#define DOCG4_M 14 /* Galois field is of order 2^14 */
+#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
+
+#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+
+/*
+ * Oob bytes 0 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
+ * Byte 15 (the last) is used by the driver as a "page written" flag.
+ */
+static struct nand_ecclayout docg4_oobinfo = {
+ .eccbytes = 9,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+ .oobavail = 7,
+ .oobfree = { {0, 7} }
+};
+
+/*
+ * The device has a nop register which M-Sys claims is for the purpose of
+ * inserting precise delays. But beware; at least some operations fail if the
+ * nop writes are replaced with a generic delay!
+ */
+static inline void write_nop(void __iomem *docptr)
+{
+ writew(0, docptr + DOC_NOP);
+}
+
+static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(nand->IO_ADDR_R);
+}
+
+static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], nand->IO_ADDR_W);
+}
+
+static int poll_status(struct docg4_priv *doc)
+{
+ /*
+ * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
+ * register. Operations known to take a long time (e.g., block erase)
+ * should sleep for a while before calling this.
+ */
+
+ uint16_t flash_status;
+ unsigned int timeo;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* hardware quirk requires reading twice initially */
+ flash_status = readw(docptr + DOC_FLASHCONTROL);
+
+ timeo = 1000;
+ do {
+ cpu_relax();
+ flash_status = readb(docptr + DOC_FLASHCONTROL);
+ } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+
+
+ if (!timeo) {
+ dev_err(doc->dev, "%s: timed out!\n", __func__);
+ return NAND_STATUS_FAIL;
+ }
+
+ if (unlikely(timeo < 50))
+ dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
+ __func__, timeo);
+
+ return 0;
+}
+
+
+static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
+{
+
+ struct docg4_priv *doc = nand->priv;
+ int status = NAND_STATUS_WP; /* inverse logic?? */
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* report any previously unreported error */
+ if (doc->status) {
+ status |= doc->status;
+ doc->status = 0;
+ return status;
+ }
+
+ status |= poll_status(doc);
+ return status;
+}
+
+static void docg4_select_chip(struct mtd_info *mtd, int chip)
+{
+ /*
+ * Select among multiple cascaded chips ("floors"). Multiple floors are
+ * not yet supported, so the only valid non-negative value is 0.
+ */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
+
+ if (chip < 0)
+ return; /* deselected */
+
+ if (chip > 0)
+ dev_warn(doc->dev, "multiple floors currently unsupported\n");
+
+ writew(0, docptr + DOC_DEVICESELECT);
+}
+
+static void reset(struct mtd_info *mtd)
+{
+ /* full device reset */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+ write_nop(docptr);
+
+ writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
+
+ poll_status(doc);
+}
+
+static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
+{
+ /* read the 7 hw-generated ecc bytes */
+
+ int i;
+ for (i = 0; i < 7; i++) { /* hw quirk; read twice */
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ }
+}
+
+static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ /*
+ * Called after a page read when hardware reports bitflips.
+ * Up to four bitflips can be corrected.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i, numerrs, errpos[4];
+ const uint8_t blank_read_hwecc[8] = {
+ 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
+
+ read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
+
+ /* check if read error is due to a blank page */
+ if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
+ return 0; /* yes */
+
+ /* skip additional check of "written flag" if ignore_badblocks */
+ if (ignore_badblocks == false) {
+
+ /*
+ * If the hw ecc bytes are not those of a blank page, there's
+ * still a chance that the page is blank, but was read with
+ * errors. Check the "written flag" in last oob byte, which
+ * is set to zero when a page is written. If more than half
+ * the bits are set, assume a blank page. Unfortunately, the
+ * bit flips(s) are not reported in stats.
+ */
+
+ if (doc->oob_buf[15]) {
+ int bit, numsetbits = 0;
+ unsigned long written_flag = doc->oob_buf[15];
+ for_each_set_bit(bit, &written_flag, 8)
+ numsetbits++;
+ if (numsetbits > 4) { /* assume blank */
+ dev_warn(doc->dev,
+ "error(s) in blank page "
+ "at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis!
+ */
+ for (i = 0; i < 7; i++)
+ doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
+
+ numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
+ doc->ecc_buf, NULL, errpos);
+
+ if (numerrs == -EBADMSG) {
+ dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return -EBADMSG;
+ }
+
+ BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
+
+ /* undo last step in BCH alg (modulo mirroring not needed) */
+ for (i = 0; i < numerrs; i++)
+ errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
+
+ /* fix the errors */
+ for (i = 0; i < numerrs; i++) {
+
+ /* ignore if error within oob ecc bytes */
+ if (errpos[i] > DOCG4_USERDATA_LEN * 8)
+ continue;
+
+ /* if error within oob area preceeding ecc bytes... */
+ if (errpos[i] > DOCG4_PAGE_SIZE * 8)
+ change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
+ (unsigned long *)doc->oob_buf);
+
+ else /* error in page data */
+ change_bit(errpos[i], (unsigned long *)buf);
+ }
+
+ dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
+ numerrs, page * DOCG4_PAGE_SIZE);
+
+ return numerrs;
+}
+
+static uint8_t docg4_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ dev_dbg(doc->dev, "%s\n", __func__);
+
+ if (doc->last_command.command == NAND_CMD_STATUS) {
+ int status;
+
+ /*
+ * Previous nand command was status request, so nand
+ * infrastructure code expects to read the status here. If an
+ * error occurred in a previous operation, report it.
+ */
+ doc->last_command.command = 0;
+
+ if (doc->status) {
+ status = doc->status;
+ doc->status = 0;
+ }
+
+ /* why is NAND_STATUS_WP inverse logic?? */
+ else
+ status = NAND_STATUS_WP | NAND_STATUS_READY;
+
+ return status;
+ }
+
+ dev_warn(doc->dev, "unexpectd call to read_byte()\n");
+
+ return 0;
+}
+
+static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
+{
+ /* write the four address bytes packed in docg4_addr to the device */
+
+ void __iomem *docptr = doc->virtadr;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+}
+
+static int read_progstatus(struct docg4_priv *doc)
+{
+ /*
+ * This apparently checks the status of programming. Done after an
+ * erasure, and after page data is written. On error, the status is
+ * saved, to be later retrieved by the nand infrastructure code.
+ */
+ void __iomem *docptr = doc->virtadr;
+
+ /* status is read from the I/O reg */
+ uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
+ __func__, status1, status2, status3);
+
+ if (status1 != DOCG4_PROGSTATUS_GOOD
+ || status2 != DOCG4_PROGSTATUS_GOOD_2
+ || status3 != DOCG4_PROGSTATUS_GOOD_2) {
+ doc->status = NAND_STATUS_FAIL;
+ dev_warn(doc->dev, "read_progstatus failed: "
+ "%02x, %02x, %02x\n", status1, status2, status3);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pageprog(struct mtd_info *mtd)
+{
+ /*
+ * Final step in writing a page. Writes the contents of its
+ * internal buffer out to the flash array, or some such.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int retval = 0;
+
+ dev_dbg(doc->dev, "docg4: %s\n", __func__);
+
+ writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* Just busy-wait; usleep_range() slows things down noticeably. */
+ poll_status(doc);
+
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ retval = read_progstatus(doc);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+
+ return retval;
+}
+
+static void sequence_reset(struct mtd_info *mtd)
+{
+ /* common starting sequence for all operations */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
+ writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+}
+
+static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in reading a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ write_addr(doc, docg4_addr);
+
+ write_nop(docptr);
+ writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ poll_status(doc);
+}
+
+static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in writing a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
+ sequence_reset(mtd);
+ writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_addr(doc, docg4_addr);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+}
+
+static uint32_t mtd_to_docg4_address(int page, int column)
+{
+ /*
+ * Convert mtd address to format used by the device, 32 bit packed.
+ *
+ * Some notes on G4 addressing... The M-Sys documentation on this device
+ * claims that pages are 2K in length, and indeed, the format of the
+ * address used by the device reflects that. But within each page are
+ * four 512 byte "sub-pages", each with its own oob data that is
+ * read/written immediately after the 512 bytes of page data. This oob
+ * data contains the ecc bytes for the preceeding 512 bytes.
+ *
+ * Rather than tell the mtd nand infrastructure that page size is 2k,
+ * with four sub-pages each, we engage in a little subterfuge and tell
+ * the infrastructure code that pages are 512 bytes in size. This is
+ * done because during the course of reverse-engineering the device, I
+ * never observed an instance where an entire 2K "page" was read or
+ * written as a unit. Each "sub-page" is always addressed individually,
+ * its data read/written, and ecc handled before the next "sub-page" is
+ * addressed.
+ *
+ * This requires us to convert addresses passed by the mtd nand
+ * infrastructure code to those used by the device.
+ *
+ * The address that is written to the device consists of four bytes: the
+ * first two are the 2k page number, and the second is the index into
+ * the page. The index is in terms of 16-bit half-words and includes
+ * the preceeding oob data, so e.g., the index into the second
+ * "sub-page" is 0x108, and the full device address of the start of mtd
+ * page 0x201 is 0x00800108.
+ */
+ int g4_page = page / 4; /* device's 2K page */
+ int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
+ return (g4_page << 16) | g4_index; /* pack */
+}
+
+static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
+ int page_addr)
+{
+ /* handle standard nand commands */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
+
+ dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
+ __func__, command, page_addr, column);
+
+ /*
+ * Save the command and its arguments. This enables emulation of
+ * standard flash devices, and also some optimizations.
+ */
+ doc->last_command.command = command;
+ doc->last_command.column = column;
+ doc->last_command.page = page_addr;
+
+ switch (command) {
+
+ case NAND_CMD_RESET:
+ reset(mtd);
+ break;
+
+ case NAND_CMD_READ0:
+ read_page_prologue(mtd, g4_addr);
+ break;
+
+ case NAND_CMD_STATUS:
+ /* next call to read_byte() will expect a status */
+ break;
+
+ case NAND_CMD_SEQIN:
+ write_page_prologue(mtd, g4_addr);
+
+ /* hack for deferred write of oob bytes */
+ if (doc->oob_page == page_addr)
+ memcpy(nand->oob_poi, doc->oob_buf, 16);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ pageprog(mtd);
+ break;
+
+ /* we don't expect these, based on review of nand_base.c */
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ dev_warn(doc->dev, "docg4_command: "
+ "unexpected nand command 0x%x\n", command);
+ break;
+
+ }
+}
+
+static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status, edc_err, *buf16;
+
+ dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+
+ writew(DOC_ECCCONF0_READ_MODE |
+ DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is page data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_err(doc->dev,
+ "docg4_read_page: bad status: 0x%02x\n", status);
+ writew(0, docptr + DOC_DATAEND);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
+
+ /*
+ * Diskonchips read oob immediately after a page read. Mtd
+ * infrastructure issues a separate command for reading oob after the
+ * page is read. So we save the oob bytes in a local buffer and just
+ * copy it if the next command reads oob from the same page.
+ */
+
+ /* first 14 oob bytes read from I/O reg */
+ docg4_read_buf(mtd, doc->oob_buf, 14);
+
+ /* last 2 read from another reg */
+ buf16 = (uint16_t *)(doc->oob_buf + 14);
+ *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ write_nop(docptr);
+
+ if (likely(use_ecc == true)) {
+
+ /* read the register that tells us if bitflip(s) detected */
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
+
+ /* If bitflips are reported, attempt to correct with ecc */
+ if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
+ int bits_corrected = correct_data(mtd, buf, page);
+ if (bits_corrected == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += bits_corrected;
+ }
+ }
+
+ writew(0, docptr + DOC_DATAEND);
+ return 0;
+}
+
+
+static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page)
+{
+ return read_page(mtd, nand, buf, page, false);
+}
+
+static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page)
+{
+ return read_page(mtd, nand, buf, page, true);
+}
+
+static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page, int sndcmd)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status;
+
+ dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
+
+ /*
+ * Oob bytes are read as part of a normal page read. If the previous
+ * nand command was a read of the page whose oob is now being read, just
+ * copy the oob bytes that we saved in a local buffer and avoid a
+ * separate oob read.
+ */
+ if (doc->last_command.command == NAND_CMD_READ0 &&
+ doc->last_command.page == page) {
+ memcpy(nand->oob_poi, doc->oob_buf, 16);
+ return 0;
+ }
+
+ /*
+ * Separate read of oob data only.
+ */
+ docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+
+ writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is oob data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_warn(doc->dev,
+ "docg4_read_oob failed: status = 0x%02x\n", status);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, nand->oob_poi, 16);
+
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+
+ return 0;
+}
+
+static void docg4_erase_block(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t g4_page;
+
+ dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ /* only 2 bytes of address are written to specify erase block */
+ g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ g4_page >>= 8;
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ write_nop(docptr);
+
+ /* start the erasure */
+ writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ usleep_range(500, 1000); /* erasure is long; take a snooze */
+ poll_status(doc);
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ read_progstatus(doc);
+
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+}
+
+static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t ecc_buf[8];
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ writew(DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+
+ /* write the page data */
+ docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
+
+ /* oob bytes 0 through 5 are written to I/O reg */
+ docg4_write_buf16(mtd, nand->oob_poi, 6);
+
+ /* oob byte 6 written to a separate reg */
+ writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
+
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* write hw-generated ecc bytes to oob */
+ if (likely(use_ecc == true)) {
+ /* oob byte 7 is hamming code */
+ uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
+ hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
+ writew(hamming, docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+
+ /* read the 7 bch bytes from ecc regs */
+ read_hw_ecc(docptr, ecc_buf);
+ ecc_buf[7] = 0; /* clear the "page written" flag */
+ }
+
+ /* write user-supplied bytes to oob */
+ else {
+ writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+ memcpy(ecc_buf, &nand->oob_poi[8], 8);
+ }
+
+ docg4_write_buf16(mtd, ecc_buf, 8);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+}
+
+static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf)
+{
+ return write_page(mtd, nand, buf, false);
+}
+
+static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf)
+{
+ return write_page(mtd, nand, buf, true);
+}
+
+static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ /*
+ * Writing oob-only is not really supported, because MLC nand must write
+ * oob bytes at the same time as page data. Nonetheless, we save the
+ * oob buffer contents here, and then write it along with the page data
+ * if the same page is subsequently written. This allows user space
+ * utilities that write the oob data prior to the page data to work
+ * (e.g., nandwrite). The disdvantage is that, if the intention was to
+ * write oob only, the operation is quietly ignored. Also, oob can get
+ * corrupted if two concurrent processes are running nandwrite.
+ */
+
+ /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
+ struct docg4_priv *doc = nand->priv;
+ doc->oob_page = page;
+ memcpy(doc->oob_buf, nand->oob_poi, 16);
+ return 0;
+}
+
+static int __init read_factory_bbt(struct mtd_info *mtd)
+{
+ /*
+ * The device contains a read-only factory bad block table. Read it and
+ * update the memory-based bbt accordingly.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
+ uint8_t *buf;
+ int i, block, status;
+
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ read_page_prologue(mtd, g4_addr);
+ status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+ if (status)
+ goto exit;
+
+ /*
+ * If no memory-based bbt was created, exit. This will happen if module
+ * parameter ignore_badblocks is set. Then why even call this function?
+ * For an unknown reason, block erase always fails if it's the first
+ * operation after device power-up. The above read ensures it never is.
+ * Ugly, I know.
+ */
+ if (nand->bbt == NULL) /* no memory-based bbt */
+ goto exit;
+
+ /*
+ * Parse factory bbt and update memory-based bbt. Factory bbt format is
+ * simple: one bit per block, block numbers increase left to right (msb
+ * to lsb). Bit clear means bad block.
+ */
+ for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
+ int bitnum;
+ unsigned long bits = ~buf[i];
+ for_each_set_bit(bitnum, &bits, 8) {
+ int badblock = block + 7 - bitnum;
+ nand->bbt[badblock / 4] |=
+ 0x03 << ((badblock % 4) * 2);
+ mtd->ecc_stats.badblocks++;
+ dev_notice(doc->dev, "factory-marked bad block: %d\n",
+ badblock);
+ }
+ }
+ exit:
+ kfree(buf);
+ return status;
+}
+
+static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ /*
+ * Mark a block as bad. Bad blocks are marked in the oob area of the
+ * first page of the block. The default scan_bbt() in the nand
+ * infrastructure code works fine for building the memory-based bbt
+ * during initialization, as does the nand infrastructure function that
+ * checks if a block is bad by reading the bbt. This function replaces
+ * the nand default because writes to oob-only are not supported.
+ */
+
+ int ret, i;
+ uint8_t *buf;
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ struct nand_bbt_descr *bbtd = nand->badblock_pattern;
+ int block = (int)(ofs >> nand->bbt_erase_shift);
+ int page = (int)(ofs >> nand->page_shift);
+ uint32_t g4_addr = mtd_to_docg4_address(page, 0);
+
+ dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
+
+ if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
+ dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
+ __func__, ofs);
+
+ /* allocate blank buffer for page data */
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* update bbt in memory */
+ nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
+
+ /* write bit-wise negation of pattern to oob buffer */
+ memset(nand->oob_poi, 0xff, mtd->oobsize);
+ for (i = 0; i < bbtd->len; i++)
+ nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
+
+ /* write first page of block */
+ write_page_prologue(mtd, g4_addr);
+ docg4_write_page(mtd, nand, buf);
+ ret = pageprog(mtd);
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* only called when module_param ignore_badblocks is set */
+ return 0;
+}
+
+static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /*
+ * Put the device into "deep power-down" mode. Note that CE# must be
+ * deasserted for this to take effect. The xscale, e.g., can be
+ * configured to float this signal when the processor enters power-down,
+ * and a suitable pull-up ensures its deassertion.
+ */
+
+ int i;
+ uint8_t pwr_down;
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* poll the register that tells us we're ready to go to sleep */
+ for (i = 0; i < 10; i++) {
+ pwr_down = readb(docptr + DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ usleep_range(1000, 4000);
+ }
+
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ dev_err(doc->dev, "suspend failed; "
+ "timeout polling DOC_POWERDOWN_READY\n");
+ return -EIO;
+ }
+
+ writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_resume(struct platform_device *pdev)
+{
+
+ /*
+ * Exit power-down. Twelve consecutive reads of the address below
+ * accomplishes this, assuming CE# has been asserted.
+ */
+
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ for (i = 0; i < 12; i++)
+ readb(docptr + 0x1fff);
+
+ return 0;
+}
+
+static void __init init_mtd_structs(struct mtd_info *mtd)
+{
+ /* initialize mtd and nand data structures */
+
+ /*
+ * Note that some of the following initializations are not usually
+ * required within a nand driver because they are performed by the nand
+ * infrastructure code as part of nand_scan(). In this case they need
+ * to be initialized here because we skip call to nand_scan_ident() (the
+ * first half of nand_scan()). The call to nand_scan_ident() is skipped
+ * because for this device the chip id is not read in the manner of a
+ * standard nand device. Unfortunately, nand_scan_ident() does other
+ * things as well, such as call nand_set_defaults().
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ mtd->size = DOCG4_CHIP_SIZE;
+ mtd->name = "Msys_Diskonchip_G4";
+ mtd->writesize = DOCG4_PAGE_SIZE;
+ mtd->erasesize = DOCG4_BLOCK_SIZE;
+ mtd->oobsize = DOCG4_OOB_SIZE;
+ nand->chipsize = DOCG4_CHIP_SIZE;
+ nand->chip_shift = DOCG4_CHIP_SHIFT;
+ nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
+ nand->chip_delay = 20;
+ nand->page_shift = DOCG4_PAGE_SHIFT;
+ nand->pagemask = 0x3ffff;
+ nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ nand->badblockbits = 8;
+ nand->ecc.layout = &docg4_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = DOCG4_PAGE_SIZE;
+ nand->ecc.prepad = 8;
+ nand->ecc.bytes = 8;
+ nand->ecc.strength = DOCG4_T;
+ nand->options =
+ NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+ nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
+ nand->controller = &nand->hwcontrol;
+ spin_lock_init(&nand->controller->lock);
+ init_waitqueue_head(&nand->controller->wq);
+
+ /* methods */
+ nand->cmdfunc = docg4_command;
+ nand->waitfunc = docg4_wait;
+ nand->select_chip = docg4_select_chip;
+ nand->read_byte = docg4_read_byte;
+ nand->block_markbad = docg4_block_markbad;
+ nand->read_buf = docg4_read_buf;
+ nand->write_buf = docg4_write_buf16;
+ nand->scan_bbt = nand_default_bbt;
+ nand->erase_cmd = docg4_erase_block;
+ nand->ecc.read_page = docg4_read_page;
+ nand->ecc.write_page = docg4_write_page;
+ nand->ecc.read_page_raw = docg4_read_page_raw;
+ nand->ecc.write_page_raw = docg4_write_page_raw;
+ nand->ecc.read_oob = docg4_read_oob;
+ nand->ecc.write_oob = docg4_write_oob;
+
+ /*
+ * The way the nand infrastructure code is written, a memory-based bbt
+ * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
+ * nand->block_bad() is used. So when ignoring bad blocks, we skip the
+ * scan and define a dummy block_bad() which always returns 0.
+ */
+ if (ignore_badblocks) {
+ nand->options |= NAND_SKIP_BBTSCAN;
+ nand->block_bad = docg4_block_neverbad;
+ }
+
+}
+
+static int __init read_id_reg(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t id1, id2;
+
+ /* check for presence of g4 chip by reading id registers */
+ id1 = readw(docptr + DOC_CHIPID);
+ id1 = readw(docptr + DOCG4_MYSTERY_REG);
+ id2 = readw(docptr + DOC_CHIPID_INV);
+ id2 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
+ dev_info(doc->dev,
+ "NAND device: 128MiB Diskonchip G4 detected\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int __init probe_docg4(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ void __iomem *virtadr;
+ struct docg4_priv *doc;
+ int len, retval;
+ struct resource *r;
+ struct device *dev = &pdev->dev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(dev, "no io memory resource defined!\n");
+ return -ENODEV;
+ }
+
+ virtadr = ioremap(r->start, resource_size(r));
+ if (!virtadr) {
+ dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
+ return -EIO;
+ }
+
+ len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
+ sizeof(struct docg4_priv);
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (mtd == NULL) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct docg4_priv *) (nand + 1);
+ mtd->priv = nand;
+ nand->priv = doc;
+ mtd->owner = THIS_MODULE;
+ doc->virtadr = virtadr;
+ doc->dev = dev;
+
+ init_mtd_structs(mtd);
+
+ /* initialize kernel bch algorithm */
+ doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+ if (doc->bch == NULL) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, doc);
+
+ reset(mtd);
+ retval = read_id_reg(mtd);
+ if (retval == -ENODEV) {
+ dev_warn(dev, "No diskonchip G4 device found.\n");
+ goto fail;
+ }
+
+ retval = nand_scan_tail(mtd);
+ if (retval)
+ goto fail;
+
+ retval = read_factory_bbt(mtd);
+ if (retval)
+ goto fail;
+
+ retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (retval)
+ goto fail;
+
+ doc->mtd = mtd;
+ return 0;
+
+ fail:
+ iounmap(virtadr);
+ if (mtd) {
+ /* re-declarations avoid compiler warning */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ nand_release(mtd); /* deletes partitions and mtd devices */
+ platform_set_drvdata(pdev, NULL);
+ free_bch(doc->bch);
+ kfree(mtd);
+ }
+
+ return retval;
+}
+
+static int __exit cleanup_docg4(struct platform_device *pdev)
+{
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ nand_release(doc->mtd);
+ platform_set_drvdata(pdev, NULL);
+ free_bch(doc->bch);
+ kfree(doc->mtd);
+ iounmap(doc->virtadr);
+ return 0;
+}
+
+static struct platform_driver docg4_driver = {
+ .driver = {
+ .name = "docg4",
+ .owner = THIS_MODULE,
+ },
+ .suspend = docg4_suspend,
+ .resume = docg4_resume,
+ .remove = __exit_p(cleanup_docg4),
+};
+
+static int __init docg4_init(void)
+{
+ return platform_driver_probe(&docg4_driver, probe_docg4);
+}
+
+static void __exit docg4_exit(void)
+{
+ platform_driver_unregister(&docg4_driver);
+}
+
+module_init(docg4_init);
+module_exit(docg4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Dunn");
+MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 7195ee6efe12..80b5264f0a32 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -813,6 +813,12 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
&fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
chip->ecc.size = 512;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ /*
+ * FIXME: can hardware ecc correct 4 bitflips if page size is
+ * 2k? Then does hardware report number of corrections for this
+ * case? If so, ecc_stats reporting needs to be fixed as well.
+ */
} else {
/* otherwise fall back to default software ECC */
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e53b76064133..1b8330e1155a 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,6 +17,10 @@
*/
#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -27,6 +31,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -34,7 +39,7 @@
#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
-static struct nand_ecclayout fsmc_ecc1_layout = {
+static struct nand_ecclayout fsmc_ecc1_128_layout = {
.eccbytes = 24,
.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
@@ -50,7 +55,127 @@ static struct nand_ecclayout fsmc_ecc1_layout = {
}
};
-static struct nand_ecclayout fsmc_ecc4_lp_layout = {
+static struct nand_ecclayout fsmc_ecc1_64_layout = {
+ .eccbytes = 12,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_16_layout = {
+ .eccbytes = 3,
+ .eccpos = {2, 3, 4},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
+ * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_256_layout = {
+ .eccbytes = 208,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126,
+ 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142,
+ 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158,
+ 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174,
+ 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190,
+ 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206,
+ 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222,
+ 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238,
+ 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 3},
+ {.offset = 143, .length = 3},
+ {.offset = 159, .length = 3},
+ {.offset = 175, .length = 3},
+ {.offset = 191, .length = 3},
+ {.offset = 207, .length = 3},
+ {.offset = 223, .length = 3},
+ {.offset = 239, .length = 3},
+ {.offset = 255, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_224_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 97}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_128_layout = {
.eccbytes = 104,
.eccpos = { 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14,
@@ -82,6 +207,45 @@ static struct nand_ecclayout fsmc_ecc4_lp_layout = {
};
/*
+ * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
+ * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_64_layout = {
+ .eccbytes = 52,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 1},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
+ * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
+ * byte is free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_16_layout = {
+ .eccbytes = 13,
+ .eccpos = { 0, 1, 2, 3, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14
+ },
+ .oobfree = {
+ {.offset = 15, .length = 1},
+ }
+};
+
+/*
* ECC placement definitions in oobfree type format.
* There are 13 bytes of ecc for every 512 byte block and it has to be read
* consecutively and immediately after the 512 byte data block for hardware to
@@ -103,16 +267,6 @@ static struct fsmc_eccplace fsmc_ecc4_lp_place = {
}
};
-static struct nand_ecclayout fsmc_ecc4_sp_layout = {
- .eccbytes = 13,
- .eccpos = { 0, 1, 2, 3, 6, 7, 8,
- 9, 10, 11, 12, 13, 14
- },
- .oobfree = {
- {.offset = 15, .length = 1},
- }
-};
-
static struct fsmc_eccplace fsmc_ecc4_sp_place = {
.eccplace = {
{.offset = 0, .length = 4},
@@ -120,75 +274,24 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-/*
- * Default partition tables to be used if the partition information not
- * provided through platform data.
- *
- * Default partition layout for small page(= 512 bytes) devices
- * Size for "Root file system" is updated in driver based on actual device size
- */
-static struct mtd_partition partition_info_16KB_blk[] = {
- {
- .name = "X-loader",
- .offset = 0,
- .size = 4*0x4000,
- },
- {
- .name = "U-Boot",
- .offset = 0x10000,
- .size = 20*0x4000,
- },
- {
- .name = "Kernel",
- .offset = 0x60000,
- .size = 256*0x4000,
- },
- {
- .name = "Root File System",
- .offset = 0x460000,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-/*
- * Default partition layout for large page(> 512 bytes) devices
- * Size for "Root file system" is updated in driver based on actual device size
- */
-static struct mtd_partition partition_info_128KB_blk[] = {
- {
- .name = "X-loader",
- .offset = 0,
- .size = 4*0x20000,
- },
- {
- .name = "U-Boot",
- .offset = 0x80000,
- .size = 12*0x20000,
- },
- {
- .name = "Kernel",
- .offset = 0x200000,
- .size = 48*0x20000,
- },
- {
- .name = "Root File System",
- .offset = 0x800000,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
*
* @pid: Part ID on the AMBA PrimeCell format
* @mtd: MTD info for a NAND flash.
* @nand: Chip related info for a NAND flash.
+ * @partitions: Partition info for a NAND Flash.
+ * @nr_partitions: Total number of partition of a NAND flash.
*
* @ecc_place: ECC placing locations in oobfree type format.
* @bank: Bank number for probed device.
* @clk: Clock structure for FSMC.
*
+ * @read_dma_chan: DMA channel for read access
+ * @write_dma_chan: DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @data_pa: NAND Physical port for Data.
* @data_va: NAND port for Data.
* @cmd_va: NAND port for Command.
* @addr_va: NAND port for Address.
@@ -198,16 +301,23 @@ struct fsmc_nand_data {
u32 pid;
struct mtd_info mtd;
struct nand_chip nand;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
struct fsmc_eccplace *ecc_place;
unsigned int bank;
+ struct device *dev;
+ enum access_mode mode;
struct clk *clk;
- struct resource *resregs;
- struct resource *rescmd;
- struct resource *resaddr;
- struct resource *resdata;
+ /* DMA related objects */
+ struct dma_chan *read_dma_chan;
+ struct dma_chan *write_dma_chan;
+ struct completion dma_access_complete;
+
+ struct fsmc_nand_timings *dev_timings;
+ dma_addr_t data_pa;
void __iomem *data_va;
void __iomem *cmd_va;
void __iomem *addr_va;
@@ -251,28 +361,29 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
struct nand_chip *this = mtd->priv;
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void *__iomem *regs = host->regs_va;
unsigned int bank = host->bank;
if (ctrl & NAND_CTRL_CHANGE) {
+ u32 pc;
+
if (ctrl & NAND_CLE) {
- this->IO_ADDR_R = (void __iomem *)host->cmd_va;
- this->IO_ADDR_W = (void __iomem *)host->cmd_va;
+ this->IO_ADDR_R = host->cmd_va;
+ this->IO_ADDR_W = host->cmd_va;
} else if (ctrl & NAND_ALE) {
- this->IO_ADDR_R = (void __iomem *)host->addr_va;
- this->IO_ADDR_W = (void __iomem *)host->addr_va;
+ this->IO_ADDR_R = host->addr_va;
+ this->IO_ADDR_W = host->addr_va;
} else {
- this->IO_ADDR_R = (void __iomem *)host->data_va;
- this->IO_ADDR_W = (void __iomem *)host->data_va;
+ this->IO_ADDR_R = host->data_va;
+ this->IO_ADDR_W = host->data_va;
}
- if (ctrl & NAND_NCE) {
- writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE,
- &regs->bank_regs[bank].pc);
- } else {
- writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE,
- &regs->bank_regs[bank].pc);
- }
+ pc = readl(FSMC_NAND_REG(regs, bank, PC));
+ if (ctrl & NAND_NCE)
+ pc |= FSMC_ENABLE;
+ else
+ pc &= ~FSMC_ENABLE;
+ writel(pc, FSMC_NAND_REG(regs, bank, PC));
}
mb();
@@ -287,22 +398,42 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
* This routine initializes timing parameters related to NAND memory access in
* FSMC registers
*/
-static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank,
- uint32_t busw)
+static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
+ uint32_t busw, struct fsmc_nand_timings *timings)
{
uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ uint32_t tclr, tar, thiz, thold, twait, tset;
+ struct fsmc_nand_timings *tims;
+ struct fsmc_nand_timings default_timings = {
+ .tclr = FSMC_TCLR_1,
+ .tar = FSMC_TAR_1,
+ .thiz = FSMC_THIZ_1,
+ .thold = FSMC_THOLD_4,
+ .twait = FSMC_TWAIT_6,
+ .tset = FSMC_TSET_0,
+ };
+
+ if (timings)
+ tims = timings;
+ else
+ tims = &default_timings;
+
+ tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+ tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+ thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+ thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+ twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+ tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (busw)
- writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc);
+ writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
else
- writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc);
-
- writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1,
- &regs->bank_regs[bank].pc);
- writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
- &regs->bank_regs[bank].comm);
- writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
- &regs->bank_regs[bank].attrib);
+ writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
+
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
+ writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
}
/*
@@ -312,15 +443,15 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
- writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256,
- &regs->bank_regs[bank].pc);
- writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN,
- &regs->bank_regs[bank].pc);
- writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN,
- &regs->bank_regs[bank].pc);
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
}
/*
@@ -333,37 +464,42 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
uint32_t ecc_tmp;
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
- if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY)
+ if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
break;
else
cond_resched();
} while (!time_after_eq(jiffies, deadline));
- ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+ if (time_after_eq(jiffies, deadline)) {
+ dev_err(host->dev, "calculate ecc timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
ecc[3] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(&regs->bank_regs[bank].ecc2);
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
ecc[4] = (uint8_t) (ecc_tmp >> 0);
ecc[5] = (uint8_t) (ecc_tmp >> 8);
ecc[6] = (uint8_t) (ecc_tmp >> 16);
ecc[7] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(&regs->bank_regs[bank].ecc3);
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
ecc[8] = (uint8_t) (ecc_tmp >> 0);
ecc[9] = (uint8_t) (ecc_tmp >> 8);
ecc[10] = (uint8_t) (ecc_tmp >> 16);
ecc[11] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(&regs->bank_regs[bank].sts);
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
ecc[12] = (uint8_t) (ecc_tmp >> 16);
return 0;
@@ -379,11 +515,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
uint32_t ecc_tmp;
- ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -391,6 +527,166 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
return 0;
}
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+ struct fsmc_nand_data *host = param;
+
+ complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ enum dma_data_direction direction)
+{
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src, dma_addr;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int ret;
+
+ if (direction == DMA_TO_DEVICE)
+ chan = host->write_dma_chan;
+ else if (direction == DMA_FROM_DEVICE)
+ chan = host->read_dma_chan;
+ else
+ return -EINVAL;
+
+ dma_dev = chan->device;
+ dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+ if (direction == DMA_TO_DEVICE) {
+ dma_src = dma_addr;
+ dma_dst = host->data_pa;
+ flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
+ } else {
+ dma_src = host->data_pa;
+ dma_dst = dma_addr;
+ flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+
+ if (!tx) {
+ dev_err(host->dev, "device_prep_dma_memcpy error\n");
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+ return -EIO;
+ }
+
+ tx->callback = dma_complete;
+ tx->callback_param = host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(host->dev, "dma_submit_error %d\n", cookie);
+ return ret;
+ }
+
+ dma_async_issue_pending(chan);
+
+ ret =
+ wait_for_completion_interruptible_timeout(&host->dma_access_complete,
+ msecs_to_jiffies(3000));
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(host->dev, "wait_for_completion_timeout\n");
+ return ret ? ret : -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ writel(p[i], chip->IO_ADDR_W);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb(buf[i], chip->IO_ADDR_W);
+ }
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ p[i] = readl(chip->IO_ADDR_R);
+ } else {
+ for (i = 0; i < len; i++)
+ buf[i] = readb(chip->IO_ADDR_R);
+ }
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
/*
* fsmc_read_page_hwecc
* @mtd: mtd info structure
@@ -426,7 +722,6 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = (uint8_t *)&ecc_oob[0];
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
-
chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -437,17 +732,19 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
group++;
/*
- * length is intentionally kept a higher multiple of 2
- * to read at least 13 bytes even in case of 16 bit NAND
- * devices
- */
- len = roundup(len, 2);
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+
chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
chip->read_buf(mtd, oob + j, len);
j += len;
}
- memcpy(&ecc_code[i], oob, 13);
+ memcpy(&ecc_code[i], oob, chip->ecc.bytes);
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
@@ -461,7 +758,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/*
- * fsmc_correct_data
+ * fsmc_bch8_correct_data
* @mtd: mtd info structure
* @dat: buffer of read data
* @read_ecc: ecc read from device spare area
@@ -470,19 +767,51 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* calc_ecc is a 104 bit information containing maximum of 8 error
* offset informations of 13 bits each in 512 bytes of read data.
*/
-static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *regs = host->regs_va;
unsigned int bank = host->bank;
- uint16_t err_idx[8];
- uint64_t ecc_data[2];
+ uint32_t err_idx[8];
uint32_t num_err, i;
+ uint32_t ecc1, ecc2, ecc3, ecc4;
+
+ num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+
+ /* no bit flipping */
+ if (likely(num_err == 0))
+ return 0;
+
+ /* too many errors */
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+ int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, chip->ecc.size);
+ return bits_data;
+ }
- /* The calculated ecc is actually the correction index in data */
- memcpy(ecc_data, calc_ecc, 13);
+ return -EBADMSG;
+ }
/*
* ------------------- calc_ecc[] bit wise -----------|--13 bits--|
@@ -493,27 +822,26 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
* uint64_t array and error offset indexes are populated in err_idx
* array
*/
- for (i = 0; i < 8; i++) {
- if (i == 4) {
- err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0];
- ecc_data[1] >>= 1;
- continue;
- }
- err_idx[i] = (ecc_data[i/4] & 0x1FFF);
- ecc_data[i/4] >>= 13;
- }
-
- num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF;
-
- if (num_err == 0xF)
- return -EBADMSG;
+ ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
i = 0;
while (num_err--) {
change_bit(0, (unsigned long *)&err_idx[i]);
change_bit(1, (unsigned long *)&err_idx[i]);
- if (err_idx[i] <= 512 * 8) {
+ if (err_idx[i] < chip->ecc.size * 8) {
change_bit(err_idx[i], (unsigned long *)dat);
i++;
}
@@ -521,6 +849,44 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
return i;
}
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+#ifdef CONFIG_OF
+static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 val;
+
+ /* Set default NAND width to 8 bits */
+ pdata->width = 8;
+ if (!of_property_read_u32(np, "bank-width", &val)) {
+ if (val == 2) {
+ pdata->width = 16;
+ } else if (val != 1) {
+ dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+ of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
+ of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
+ if (of_get_property(np, "nand-skip-bbtscan", NULL))
+ pdata->options = NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+#else
+static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
@@ -528,102 +894,109 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
static int __init fsmc_nand_probe(struct platform_device *pdev)
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node __maybe_unused *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata = {};
struct fsmc_nand_data *host;
struct mtd_info *mtd;
struct nand_chip *nand;
- struct fsmc_regs *regs;
struct resource *res;
+ dma_cap_mask_t mask;
int ret = 0;
u32 pid;
int i;
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ ret = fsmc_nand_probe_config_dt(pdev, np);
+ if (ret) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+ }
+
if (!pdata) {
dev_err(&pdev->dev, "platform data is NULL\n");
return -EINVAL;
}
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(*host), GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host) {
dev_err(&pdev->dev, "failed to allocate device structure\n");
return -ENOMEM;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res) {
- ret = -EIO;
- goto err_probe1;
- }
+ if (!res)
+ return -EINVAL;
- host->resdata = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!host->resdata) {
- ret = -EIO;
- goto err_probe1;
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory data resourse\n");
+ return -ENOENT;
}
- host->data_va = ioremap(res->start, resource_size(res));
+ host->data_pa = (dma_addr_t)res->start;
+ host->data_va = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!host->data_va) {
- ret = -EIO;
- goto err_probe1;
+ dev_err(&pdev->dev, "data ioremap failed\n");
+ return -ENOMEM;
}
- host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE,
- resource_size(res), pdev->name);
- if (!host->resaddr) {
- ret = -EIO;
- goto err_probe1;
+ if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
+ return -ENOENT;
}
- host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res));
+ host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
+ resource_size(res));
if (!host->addr_va) {
- ret = -EIO;
- goto err_probe1;
+ dev_err(&pdev->dev, "ale ioremap failed\n");
+ return -ENOMEM;
}
- host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE,
- resource_size(res), pdev->name);
- if (!host->rescmd) {
- ret = -EIO;
- goto err_probe1;
+ if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
+ return -ENOENT;
}
- host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res));
+ host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
+ resource_size(res));
if (!host->cmd_va) {
- ret = -EIO;
- goto err_probe1;
+ dev_err(&pdev->dev, "ale ioremap failed\n");
+ return -ENOMEM;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
- if (!res) {
- ret = -EIO;
- goto err_probe1;
- }
+ if (!res)
+ return -EINVAL;
- host->resregs = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!host->resregs) {
- ret = -EIO;
- goto err_probe1;
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
+ return -ENOENT;
}
- host->regs_va = ioremap(res->start, resource_size(res));
+ host->regs_va = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!host->regs_va) {
- ret = -EIO;
- goto err_probe1;
+ dev_err(&pdev->dev, "regs ioremap failed\n");
+ return -ENOMEM;
}
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to fetch block clock\n");
- ret = PTR_ERR(host->clk);
- host->clk = NULL;
- goto err_probe1;
+ return PTR_ERR(host->clk);
}
ret = clk_enable(host->clk);
if (ret)
- goto err_probe1;
+ goto err_clk_enable;
/*
* This device ID is actually a common AMBA ID as used on the
@@ -639,7 +1012,14 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->bank = pdata->bank;
host->select_chip = pdata->select_bank;
- regs = host->regs_va;
+ host->partitions = pdata->partitions;
+ host->nr_partitions = pdata->nr_partitions;
+ host->dev = &pdev->dev;
+ host->dev_timings = pdata->nand_timings;
+ host->mode = pdata->mode;
+
+ if (host->mode == USE_DMA_ACCESS)
+ init_completion(&host->dma_access_complete);
/* Link all private pointers */
mtd = &host->mtd;
@@ -658,21 +1038,53 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.size = 512;
nand->options = pdata->options;
nand->select_chip = fsmc_select_chip;
+ nand->badblockbits = 7;
if (pdata->width == FSMC_NAND_BW16)
nand->options |= NAND_BUSWIDTH_16;
- fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
+ switch (host->mode) {
+ case USE_DMA_ACCESS:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->read_dma_chan = dma_request_channel(mask, filter,
+ pdata->read_dma_priv);
+ if (!host->read_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ goto err_req_read_chnl;
+ }
+ host->write_dma_chan = dma_request_channel(mask, filter,
+ pdata->write_dma_priv);
+ if (!host->write_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ goto err_req_write_chnl;
+ }
+ nand->read_buf = fsmc_read_buf_dma;
+ nand->write_buf = fsmc_write_buf_dma;
+ break;
+
+ default:
+ case USE_WORD_ACCESS:
+ nand->read_buf = fsmc_read_buf;
+ nand->write_buf = fsmc_write_buf;
+ break;
+ }
+
+ fsmc_nand_setup(host->regs_va, host->bank,
+ nand->options & NAND_BUSWIDTH_16,
+ host->dev_timings);
if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
nand->ecc.calculate = fsmc_read_hwecc_ecc4;
- nand->ecc.correct = fsmc_correct_data;
+ nand->ecc.correct = fsmc_bch8_correct_data;
nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
} else {
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
nand->ecc.correct = nand_correct_data;
nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
}
/*
@@ -681,19 +1093,52 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (nand_scan_ident(&host->mtd, 1, NULL)) {
ret = -ENXIO;
dev_err(&pdev->dev, "No NAND Device found!\n");
- goto err_probe;
+ goto err_scan_ident;
}
if (AMBA_REV_BITS(host->pid) >= 8) {
- if (host->mtd.writesize == 512) {
- nand->ecc.layout = &fsmc_ecc4_sp_layout;
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc4_16_layout;
host->ecc_place = &fsmc_ecc4_sp_place;
- } else {
- nand->ecc.layout = &fsmc_ecc4_lp_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc4_64_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc4_128_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 224:
+ nand->ecc.layout = &fsmc_ecc4_224_layout;
host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 256:
+ nand->ecc.layout = &fsmc_ecc4_256_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ default:
+ printk(KERN_WARNING "No oob scheme defined for "
+ "oobsize %d\n", mtd->oobsize);
+ BUG();
}
} else {
- nand->ecc.layout = &fsmc_ecc1_layout;
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc1_16_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc1_64_layout;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc1_128_layout;
+ break;
+ default:
+ printk(KERN_WARNING "No oob scheme defined for "
+ "oobsize %d\n", mtd->oobsize);
+ BUG();
+ }
}
/* Second stage of scan to fill MTD data-structures */
@@ -713,13 +1158,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Check for partition info passed
*/
host->mtd.name = "nand";
- ret = mtd_device_parse_register(&host->mtd, NULL, 0,
- host->mtd.size <= 0x04000000 ?
- partition_info_16KB_blk :
- partition_info_128KB_blk,
- host->mtd.size <= 0x04000000 ?
- ARRAY_SIZE(partition_info_16KB_blk) :
- ARRAY_SIZE(partition_info_128KB_blk));
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
+ host->partitions, host->nr_partitions);
if (ret)
goto err_probe;
@@ -728,32 +1169,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
return 0;
err_probe:
+err_scan_ident:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->write_dma_chan);
+err_req_write_chnl:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->read_dma_chan);
+err_req_read_chnl:
clk_disable(host->clk);
-err_probe1:
- if (host->clk)
- clk_put(host->clk);
- if (host->regs_va)
- iounmap(host->regs_va);
- if (host->resregs)
- release_mem_region(host->resregs->start,
- resource_size(host->resregs));
- if (host->cmd_va)
- iounmap(host->cmd_va);
- if (host->rescmd)
- release_mem_region(host->rescmd->start,
- resource_size(host->rescmd));
- if (host->addr_va)
- iounmap(host->addr_va);
- if (host->resaddr)
- release_mem_region(host->resaddr->start,
- resource_size(host->resaddr));
- if (host->data_va)
- iounmap(host->data_va);
- if (host->resdata)
- release_mem_region(host->resdata->start,
- resource_size(host->resdata));
-
- kfree(host);
+err_clk_enable:
+ clk_put(host->clk);
return ret;
}
@@ -768,24 +1193,15 @@ static int fsmc_nand_remove(struct platform_device *pdev)
if (host) {
nand_release(&host->mtd);
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_release_channel(host->write_dma_chan);
+ dma_release_channel(host->read_dma_chan);
+ }
clk_disable(host->clk);
clk_put(host->clk);
-
- iounmap(host->regs_va);
- release_mem_region(host->resregs->start,
- resource_size(host->resregs));
- iounmap(host->cmd_va);
- release_mem_region(host->rescmd->start,
- resource_size(host->rescmd));
- iounmap(host->addr_va);
- release_mem_region(host->resaddr->start,
- resource_size(host->resaddr));
- iounmap(host->data_va);
- release_mem_region(host->resdata->start,
- resource_size(host->resdata));
-
- kfree(host);
}
+
return 0;
}
@@ -801,15 +1217,24 @@ static int fsmc_nand_suspend(struct device *dev)
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
- if (host)
+ if (host) {
clk_enable(host->clk);
+ fsmc_nand_setup(host->regs_va, host->bank,
+ host->nand.options & NAND_BUSWIDTH_16,
+ host->dev_timings);
+ }
return 0;
}
-static const struct dev_pm_ops fsmc_nand_pm_ops = {
- .suspend = fsmc_nand_suspend,
- .resume = fsmc_nand_resume,
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id fsmc_nand_id_table[] = {
+ { .compatible = "st,spear600-fsmc-nand" },
+ {}
};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
#endif
static struct platform_driver fsmc_nand_driver = {
@@ -817,6 +1242,7 @@ static struct platform_driver fsmc_nand_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "fsmc-nand",
+ .of_match_table = of_match_ptr(fsmc_nand_id_table),
#ifdef CONFIG_PM
.pm = &fsmc_nand_pm_ops,
#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 7db6555ed3ba..e8ea7107932e 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -835,7 +835,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
| BM_GPMI_CTRL0_ADDRESS_INCREMENT
| BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
pio[1] = pio[2] = 0;
- desc = channel->device->device_prep_slave_sg(channel,
+ desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
@@ -848,8 +848,10 @@ int gpmi_send_command(struct gpmi_nand_data *this)
sg_init_one(sgl, this->cmd_buffer, this->command_length);
dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
- desc = channel->device->device_prep_slave_sg(channel,
- sgl, 1, DMA_MEM_TO_DEV, 1);
+ desc = dmaengine_prep_slave_sg(channel,
+ sgl, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -880,8 +882,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
| BF_GPMI_CTRL0_ADDRESS(address)
| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
pio[1] = 0;
- desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio,
+ desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
@@ -890,8 +891,9 @@ int gpmi_send_data(struct gpmi_nand_data *this)
/* [2] send DMA request */
prepare_data_dma(this, DMA_TO_DEVICE);
- desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_MEM_TO_DEV, 1);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -916,7 +918,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
pio[1] = 0;
- desc = channel->device->device_prep_slave_sg(channel,
+ desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
@@ -926,8 +928,9 @@ int gpmi_read_data(struct gpmi_nand_data *this)
/* [2] : send DMA request */
prepare_data_dma(this, DMA_FROM_DEVICE);
- desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_DEV_TO_MEM, 1);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -972,9 +975,10 @@ int gpmi_send_page(struct gpmi_nand_data *this,
pio[4] = payload;
pio[5] = auxiliary;
- desc = channel->device->device_prep_slave_sg(channel,
+ desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_CTRL_ACK);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -1007,7 +1011,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_ADDRESS(address)
| BF_GPMI_CTRL0_XFER_COUNT(0);
pio[1] = 0;
- desc = channel->device->device_prep_slave_sg(channel,
+ desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio, 2,
DMA_TRANS_NONE, 0);
if (!desc) {
@@ -1036,9 +1040,10 @@ int gpmi_read_page(struct gpmi_nand_data *this,
pio[3] = geo->page_size;
pio[4] = payload;
pio[5] = auxiliary;
- desc = channel->device->device_prep_slave_sg(channel,
+ desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -1055,9 +1060,11 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_ADDRESS(address)
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
pio[1] = 0;
- desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2,
- DMA_TRANS_NONE, 1);
+ pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 3,
+ DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
pr_err("step 3 error\n");
return -1;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 493ec2fcf97f..75b1dde16358 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1124,7 +1124,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* Do we have a flash based bad block table ? */
- if (chip->options & NAND_BBT_USE_FLASH)
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
ret = nand_update_bbt(mtd, ofs);
else {
chipnr = (int)(ofs >> chip->chip_shift);
@@ -1155,7 +1155,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
-static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
{
struct boot_rom_geometry *geometry = &this->rom_geometry;
@@ -1182,7 +1182,7 @@ static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
}
static const char *fingerprint = "STMP";
-static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
{
struct boot_rom_geometry *rom_geo = &this->rom_geometry;
struct device *dev = this->dev;
@@ -1239,7 +1239,7 @@ static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
}
/* Writes a transcription stamp. */
-static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
{
struct device *dev = this->dev;
struct boot_rom_geometry *rom_geo = &this->rom_geometry;
@@ -1322,7 +1322,7 @@ static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
return 0;
}
-static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
+static int mx23_boot_init(struct gpmi_nand_data *this)
{
struct device *dev = this->dev;
struct nand_chip *chip = &this->nand;
@@ -1391,7 +1391,7 @@ static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
return 0;
}
-static int __devinit nand_boot_init(struct gpmi_nand_data *this)
+static int nand_boot_init(struct gpmi_nand_data *this)
{
nand_boot_set_geometry(this);
@@ -1401,7 +1401,7 @@ static int __devinit nand_boot_init(struct gpmi_nand_data *this)
return 0;
}
-static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this)
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
{
int ret;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index e023bccb7781..ec6180d4ff8f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -20,7 +20,7 @@
#include <linux/mtd/nand.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <mach/dma.h>
+#include <linux/fsl/mxs-dma.h>
struct resources {
void *gpmi_regs;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 5dc6f0d92f1a..11e487813428 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -135,8 +135,8 @@ static int __init h1910_init(void)
}
/* Register the partitions */
- mtd_device_parse_register(h1910_nand_mtd, NULL, 0,
- partition_info, NUM_PARTITIONS);
+ mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
+ NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ac3b9f255e00..e4147e8acb7c 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,6 +332,11 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
chip->ecc.size = 512;
chip->ecc.bytes = 9;
+ chip->ecc.strength = 2;
+ /*
+ * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
+ * conservative guess, given 9 ecc bytes and reed-solomon alg.
+ */
if (pdata)
chip->ecc.layout = pdata->ecc_layout;
@@ -367,9 +372,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
goto err_gpio_free;
}
- ret = mtd_device_parse_register(mtd, NULL, 0,
- pdata ? pdata->partitions : NULL,
- pdata ? pdata->num_partitions : 0);
+ ret = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 74a43b818d0e..cc0678a967c1 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1225,9 +1225,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto escan;
}
+ if (this->ecc.mode == NAND_ECC_HW) {
+ if (nfc_is_v1())
+ this->ecc.strength = 1;
+ else
+ this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
/* Register the partitions */
- mtd_device_parse_register(mtd, part_probes, 0,
- pdata->parts, pdata->nr_parts);
+ mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
+ pdata->nr_parts);
platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8a393f9e6027..47b19c0bb070 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -123,12 +123,6 @@ static int check_offs_len(struct mtd_info *mtd,
ret = -EINVAL;
}
- /* Do not allow past end of device */
- if (ofs + len > mtd->size) {
- pr_debug("%s: past end of device\n", __func__);
- ret = -EINVAL;
- }
-
return ret;
}
@@ -338,7 +332,7 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
*/
static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
- int page, chipnr, res = 0;
+ int page, chipnr, res = 0, i = 0;
struct nand_chip *chip = mtd->priv;
u16 bad;
@@ -356,23 +350,29 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
chip->select_chip(mtd, chipnr);
}
- if (chip->options & NAND_BUSWIDTH_16) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
- page);
- bad = cpu_to_le16(chip->read_word(mtd));
- if (chip->badblockpos & 0x1)
- bad >>= 8;
- else
- bad &= 0xFF;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- bad = chip->read_byte(mtd);
- }
+ do {
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ chip->badblockpos & 0xFE, page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ else
+ bad &= 0xFF;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+ page);
+ bad = chip->read_byte(mtd);
+ }
- if (likely(chip->badblockbits == 8))
- res = bad != 0xFF;
- else
- res = hweight8(bad) < chip->badblockbits;
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ ofs += mtd->writesize;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
if (getchip)
nand_release_device(mtd);
@@ -386,51 +386,79 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by a hardware
- * specific driver.
+ * specific driver. We try operations in the following order, according to our
+ * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) update in-memory BBT
+ * (3) write bad block marker to OOB area of affected block
+ * (4) update flash-based BBT
+ * Note that we retain the first error encountered in (3) or (4), finish the
+ * procedures, and dump the error in the end.
*/
static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
uint8_t buf[2] = { 0, 0 };
- int block, ret, i = 0;
+ int block, res, ret = 0, i = 0;
+ int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += mtd->erasesize - mtd->writesize;
+ if (write_oob) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ einfo.len = 1 << chip->phys_erase_shift;
+ nand_erase_nand(mtd, &einfo, 0);
+ }
/* Get block number */
block = (int)(ofs >> chip->bbt_erase_shift);
+ /* Mark block bad in memory-based BBT */
if (chip->bbt)
chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
- /* Do we have a flash based bad block table? */
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- ret = nand_update_bbt(mtd, ofs);
- else {
+ /* Write bad block marker to OOB */
+ if (write_oob) {
struct mtd_oob_ops ops;
+ loff_t wr_ofs = ofs;
nand_get_device(chip, mtd, FL_WRITING);
- /*
- * Write to first two pages if necessary. If we write to more
- * than one location, the first error encountered quits the
- * procedure. We write two bytes per location, so we dont have
- * to mess with 16 bit access.
- */
- ops.len = ops.ooblen = 2;
ops.datbuf = NULL;
ops.oobbuf = buf;
- ops.ooboffs = chip->badblockpos & ~0x01;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ wr_ofs += mtd->erasesize - mtd->writesize;
do {
- ret = nand_do_write_oob(mtd, ofs, &ops);
+ res = nand_do_write_oob(mtd, wr_ofs, &ops);
+ if (!ret)
+ ret = res;
i++;
- ofs += mtd->writesize;
- } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) &&
- i < 2);
+ wr_ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
nand_release_device(mtd);
}
+
+ /* Update flash-based bad block table */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ res = nand_update_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+
if (!ret)
mtd->ecc_stats.badblocks++;
@@ -1586,25 +1614,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((from + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
nand_get_device(chip, mtd, FL_READING);
-
ops.len = len;
ops.datbuf = buf;
ops.oobbuf = NULL;
ops.mode = 0;
-
ret = nand_do_read_ops(mtd, from, &ops);
-
*retlen = ops.retlen;
-
nand_release_device(mtd);
-
return ret;
}
@@ -2293,12 +2310,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((to + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
@@ -2333,25 +2344,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((to + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
nand_get_device(chip, mtd, FL_WRITING);
-
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
ops.mode = 0;
-
ret = nand_do_write_ops(mtd, to, &ops);
-
*retlen = ops.retlen;
-
nand_release_device(mtd);
-
return ret;
}
@@ -2550,8 +2550,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
/* Grab the lock and see if the device is available */
nand_get_device(chip, mtd, FL_ERASING);
@@ -2715,10 +2713,6 @@ static void nand_sync(struct mtd_info *mtd)
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
- /* Check for invalid offset */
- if (offs > mtd->size)
- return -EINVAL;
-
return nand_block_checkbad(mtd, offs, 1, 0);
}
@@ -2857,7 +2851,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
- pr_info("ONFI flash detected\n");
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
@@ -2898,7 +2891,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
mtd->writesize = le32_to_cpu(p->byte_per_page);
mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+ chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
*busw = 0;
if (le16_to_cpu(p->features) & 1)
*busw = NAND_BUSWIDTH_16;
@@ -2907,6 +2901,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->options |= (NAND_NO_READRDY |
NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+ pr_info("ONFI flash detected\n");
return 1;
}
@@ -3238,6 +3233,10 @@ int nand_scan_tail(struct mtd_info *mtd)
int i;
struct nand_chip *chip = mtd->priv;
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH));
+
if (!(chip->options & NAND_OWN_BUFFERS))
chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
if (!chip->buffers)
@@ -3350,6 +3349,7 @@ int nand_scan_tail(struct mtd_info *mtd)
if (!chip->ecc.size)
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
break;
case NAND_ECC_SOFT_BCH:
@@ -3384,6 +3384,8 @@ int nand_scan_tail(struct mtd_info *mtd)
pr_warn("BCH ECC initialization failed!\n");
BUG();
}
+ chip->ecc.strength =
+ chip->ecc.bytes*8 / fls(8*chip->ecc.size);
break;
case NAND_ECC_NONE:
@@ -3397,6 +3399,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.write_oob = nand_write_oob_std;
chip->ecc.size = mtd->writesize;
chip->ecc.bytes = 0;
+ chip->ecc.strength = 0;
break;
default:
@@ -3461,25 +3464,26 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
- mtd->erase = nand_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = nand_read;
- mtd->write = nand_write;
- mtd->panic_write = panic_nand_write;
- mtd->read_oob = nand_read_oob;
- mtd->write_oob = nand_write_oob;
- mtd->sync = nand_sync;
- mtd->lock = NULL;
- mtd->unlock = NULL;
- mtd->suspend = nand_suspend;
- mtd->resume = nand_resume;
- mtd->block_isbad = nand_block_isbad;
- mtd->block_markbad = nand_block_markbad;
+ mtd->_erase = nand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = nand_read;
+ mtd->_write = nand_write;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = NULL;
+ mtd->_unlock = NULL;
+ mtd->_suspend = nand_suspend;
+ mtd->_resume = nand_resume;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
- /* propagate ecc.layout to mtd_info */
+ /* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
+ mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ec688548c880..2b6f632cf274 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -179,6 +179,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
chip->priv = ndfc;
ndfc->mtd.priv = chip;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index b3a883e2a22f..c2b0bba9d8b3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1058,6 +1058,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
(pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
info->nand.ecc.bytes = 3;
info->nand.ecc.size = 512;
+ info->nand.ecc.strength = 1;
info->nand.ecc.calculate = omap_calculate_ecc;
info->nand.ecc.hwctl = omap_enable_hwecc;
info->nand.ecc.correct = omap_correct_data;
@@ -1101,8 +1102,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- mtd_device_parse_register(&info->mtd, NULL, 0,
- pdata->parts, pdata->nr_parts);
+ mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 29f505adaf84..1d3bfb26080c 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -129,8 +129,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd->name = "orion_nand";
- ret = mtd_device_parse_register(mtd, NULL, 0,
- board->parts, board->nr_parts);
+ ret = mtd_device_parse_register(mtd, NULL, NULL, board->parts,
+ board->nr_parts);
if (ret) {
nand_release(mtd);
goto no_dev;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 7f2da6953357..6404e6e81b10 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -99,8 +99,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
}
err = mtd_device_parse_register(&data->mtd,
- pdata->chip.part_probe_types, 0,
- pdata->chip.partitions, pdata->chip.nr_partitions);
+ pdata->chip.part_probe_types, NULL,
+ pdata->chip.partitions,
+ pdata->chip.nr_partitions);
if (!err)
return err;
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 7e52af51a198..0ddd90e5788f 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -275,11 +275,10 @@ static int __init ppchameleonevb_init(void)
ppchameleon_mtd->name = "ppchameleon-nand";
/* Register the partitions */
- mtd_device_parse_register(ppchameleon_mtd, NULL, 0,
- ppchameleon_mtd->size == NAND_SMALL_SIZE ?
- partition_info_me :
- partition_info_hi,
- NUM_PARTITIONS);
+ mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
+ ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+ partition_info_me : partition_info_hi,
+ NUM_PARTITIONS);
nand_evb_init:
/****************************
@@ -365,11 +364,10 @@ static int __init ppchameleonevb_init(void)
ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
/* Register the partitions */
- mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0,
- ppchameleon_mtd->size == NAND_SMALL_SIZE ?
- partition_info_me :
- partition_info_hi,
- NUM_PARTITIONS);
+ mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
+ ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+ partition_info_me : partition_info_hi,
+ NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5c3d719c37e6..def50caa6f84 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1002,6 +1002,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
KEEP_CONFIG:
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = host->page_size;
+ chip->ecc.strength = 1;
chip->options = NAND_NO_AUTOINCR;
chip->options |= NAND_NO_READRDY;
@@ -1228,8 +1229,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
continue;
}
- ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0,
- pdata->parts[cs], pdata->nr_parts[cs]);
+ ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
+ NULL, pdata->parts[cs],
+ pdata->nr_parts[cs]);
if (!ret)
probe_success = 1;
}
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 769a4e096b3c..c2040187c813 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -891,6 +891,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
chip->ecc.size = R852_DMA_LEN;
chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
chip->ecc.hwctl = r852_ecc_hwctl;
chip->ecc.calculate = r852_ecc_calculate;
chip->ecc.correct = r852_ecc_correct;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index f309addc2fa0..e55b5cfbe145 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -527,6 +527,7 @@ static int __init rtc_from4_init(void)
this->ecc.mode = NAND_ECC_HW_SYNDROME;
this->ecc.size = 512;
this->ecc.bytes = 8;
+ this->ecc.strength = 3;
/* return the status of extra status and ECC checks */
this->errstat = rtc_from4_errstat;
/* set the nand_oobinfo to support FPGA H/W error detection */
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 868685db6712..91121f33f743 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -751,8 +751,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
if (set)
mtd->mtd.name = set->name;
- return mtd_device_parse_register(&mtd->mtd, NULL, 0,
- set->partitions, set->nr_partitions);
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ set->partitions, set->nr_partitions);
}
/**
@@ -823,6 +823,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
chip->ecc.correct = s3c2410_nand_correct_data;
chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.strength = 1;
switch (info->cpu_type) {
case TYPE_S3C2410:
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 93b1f74321c2..e9b2b260de3a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -283,7 +284,7 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT;
+ uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
uint32_t flcmdcr_val, addr_len_bytes = 0;
/* Set SNAND bit if page size is 2048byte */
@@ -303,6 +304,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
break;
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
+ case NAND_CMD_RNDOUT:
addr_len_bytes = flctl->rw_ADRCNT;
flcmdcr_val |= CDSRC_E;
if (flctl->chip.options & NAND_BUSWIDTH_16)
@@ -320,6 +322,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
break;
case NAND_CMD_READID:
flcmncr_val &= ~SNAND_E;
+ flcmdcr_val |= CDSRC_E;
addr_len_bytes = ADRCNT_1;
break;
case NAND_CMD_STATUS:
@@ -513,6 +516,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
struct sh_flctl *flctl = mtd_to_flctl(mtd);
uint32_t read_cmd = 0;
+ pm_runtime_get_sync(&flctl->pdev->dev);
+
flctl->read_bytes = 0;
if (command != NAND_CMD_PAGEPROG)
flctl->index = 0;
@@ -525,7 +530,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
execmd_read_page_sector(mtd, page_addr);
break;
}
- empty_fifo(flctl);
if (flctl->page_size)
set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
| command);
@@ -547,7 +551,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
break;
}
- empty_fifo(flctl);
if (flctl->page_size) {
set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
| NAND_CMD_READ0);
@@ -559,15 +562,35 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
flctl->read_bytes = mtd->oobsize;
goto read_normal_exit;
+ case NAND_CMD_RNDOUT:
+ if (flctl->hwecc)
+ break;
+
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+ goto read_normal_exit;
+
case NAND_CMD_READID:
- empty_fifo(flctl);
set_cmd_regs(mtd, command, command);
- set_addr(mtd, 0, 0);
- flctl->read_bytes = 4;
+ /* READID is always performed using an 8-bit bus */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column <<= 1;
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = 8;
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
start_translation(flctl);
- read_datareg(flctl, 0); /* read and end */
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
break;
case NAND_CMD_ERASE1:
@@ -650,29 +673,55 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
default:
break;
}
- return;
+ goto runtime_exit;
read_normal_exit:
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
start_translation(flctl);
read_fiforeg(flctl, flctl->read_bytes, 0);
wait_completion(flctl);
+runtime_exit:
+ pm_runtime_put_sync(&flctl->pdev->dev);
return;
}
static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- uint32_t flcmncr_val = readl(FLCMNCR(flctl));
+ int ret;
switch (chipnr) {
case -1:
- flcmncr_val &= ~CE0_ENABLE;
- writel(flcmncr_val, FLCMNCR(flctl));
+ flctl->flcmncr_base &= ~CE0_ENABLE;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+ if (flctl->qos_request) {
+ dev_pm_qos_remove_request(&flctl->pm_qos);
+ flctl->qos_request = 0;
+ }
+
+ pm_runtime_put_sync(&flctl->pdev->dev);
break;
case 0:
- flcmncr_val |= CE0_ENABLE;
- writel(flcmncr_val, FLCMNCR(flctl));
+ flctl->flcmncr_base |= CE0_ENABLE;
+
+ if (!flctl->qos_request) {
+ ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+ &flctl->pm_qos, 100);
+ if (ret < 0)
+ dev_err(&flctl->pdev->dev,
+ "PM QoS request failed: %d\n", ret);
+ flctl->qos_request = 1;
+ }
+
+ if (flctl->holden) {
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(HOLDEN, FLHOLDCR(flctl));
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ }
break;
default:
BUG();
@@ -730,11 +779,6 @@ static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
return 0;
}
-static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
-{
- writel(val, FLCMNCR(flctl));
-}
-
static int flctl_chip_init_tail(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -781,13 +825,13 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
chip->ecc.size = 512;
chip->ecc.bytes = 10;
+ chip->ecc.strength = 4;
chip->ecc.read_page = flctl_read_page_hwecc;
chip->ecc.write_page = flctl_write_page_hwecc;
chip->ecc.mode = NAND_ECC_HW;
/* 4 symbols ECC enabled */
- writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02,
- FLCMNCR(flctl));
+ flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
} else {
chip->ecc.mode = NAND_ECC_SOFT;
}
@@ -819,13 +863,13 @@ static int __devinit flctl_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- goto err;
+ goto err_iomap;
}
flctl->reg = ioremap(res->start, resource_size(res));
if (flctl->reg == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
- goto err;
+ goto err_iomap;
}
platform_set_drvdata(pdev, flctl);
@@ -833,9 +877,9 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand = &flctl->chip;
flctl_mtd->priv = nand;
flctl->pdev = pdev;
+ flctl->flcmncr_base = pdata->flcmncr_val;
flctl->hwecc = pdata->has_hwecc;
-
- flctl_register_init(flctl, pdata->flcmncr_val);
+ flctl->holden = pdata->use_holden;
nand->options = NAND_NO_AUTOINCR;
@@ -855,23 +899,28 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
- goto err;
+ goto err_chip;
ret = flctl_chip_init_tail(flctl_mtd);
if (ret)
- goto err;
+ goto err_chip;
ret = nand_scan_tail(flctl_mtd);
if (ret)
- goto err;
+ goto err_chip;
mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
return 0;
-err:
+err_chip:
+ pm_runtime_disable(&pdev->dev);
+err_iomap:
kfree(flctl);
return ret;
}
@@ -881,6 +930,7 @@ static int __devexit flctl_remove(struct platform_device *pdev)
struct sh_flctl *flctl = platform_get_drvdata(pdev);
nand_release(&flctl->mtd);
+ pm_runtime_disable(&pdev->dev);
kfree(flctl);
return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index b175c0fd8b93..3421e3762a5a 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -167,6 +167,7 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 256;
this->ecc.bytes = 3;
+ this->ecc.strength = 1;
this->badblock_pattern = data->badblock_pattern;
this->ecc.layout = data->ecc_layout;
this->ecc.hwctl = sharpsl_nand_enable_hwecc;
@@ -181,8 +182,8 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Register the partitions */
sharpsl->mtd.name = "sharpsl-nand";
- err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0,
- data->partitions, data->nr_partitions);
+ err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
+ data->partitions, data->nr_partitions);
if (err)
goto err_add;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 6caa0cd9d6a7..5aa518081c51 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -430,6 +430,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->ecc.mode = NAND_ECC_HW;
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 6;
+ nand_chip->ecc.strength = 2;
nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
nand_chip->ecc.correct = tmio_nand_correct_data;
@@ -456,9 +457,9 @@ static int tmio_probe(struct platform_device *dev)
goto err_scan;
}
/* Register the partitions */
- retval = mtd_device_parse_register(mtd, NULL, 0,
- data ? data->partition : NULL,
- data ? data->num_partitions : 0);
+ retval = mtd_device_parse_register(mtd, NULL, NULL,
+ data ? data->partition : NULL,
+ data ? data->num_partitions : 0);
if (!retval)
return retval;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index c7c4f1d11c77..26398dcf21cf 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -356,6 +356,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
/* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
chip->chip_delay = 100;
chip->controller = &drvdata->hw_control;
@@ -386,7 +387,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
}
mtd->name = txx9_priv->mtdname;
- mtd_device_parse_register(mtd, NULL, 0, NULL, 0);
+ mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
drvdata->mtds[i] = mtd;
}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index a75382aff5f6..c5f4ebf4b384 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,13 +56,6 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd_can_have_bb(mtd)) {
- printk(KERN_ERR
-"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
-"Please use the new diskonchip driver under the NAND subsystem.\n");
- return;
- }
-
pr_debug("NFTL: add_mtd for %s\n", mtd->name);
nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 0ccd5bff2544..1c4f97c63e62 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -70,9 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
- err = mtd_device_parse_register(&info->mtd, NULL, 0,
- pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, info);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 7e9ea6852b67..398a82783848 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -751,9 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- r = mtd_device_parse_register(&c->mtd, NULL, 0,
- pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ r = mtd_device_parse_register(&c->mtd, NULL, NULL,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
if (r)
goto err_release_onenand;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a061bc163da2..b3ce12ef359e 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1753,16 +1753,6 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
(int)len);
- /* Initialize retlen, in case of early exit */
- *retlen = 0;
-
- /* Do not allow writes past end of device */
- if (unlikely((to + len) > mtd->size)) {
- printk(KERN_ERR "%s: Attempt write to past end of device\n",
- __func__);
- return -EINVAL;
- }
-
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -1890,13 +1880,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
ops->oobretlen = 0;
- /* Do not allow writes past end of device */
- if (unlikely((to + len) > mtd->size)) {
- printk(KERN_ERR "%s: Attempt write to past end of device\n",
- __func__);
- return -EINVAL;
- }
-
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -2493,12 +2476,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
(unsigned long long)instr->addr,
(unsigned long long)instr->len);
- /* Do not allow erase past end of device */
- if (unlikely((len + addr) > mtd->size)) {
- printk(KERN_ERR "%s: Erase past end of device\n", __func__);
- return -EINVAL;
- }
-
if (FLEXONENAND(this)) {
/* Find the eraseregion of this address */
int i = flexonenand_region(mtd, addr);
@@ -2525,8 +2502,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EINVAL;
}
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
@@ -4103,33 +4078,34 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->oobavail = this->ecclayout->oobavail;
mtd->ecclayout = this->ecclayout;
+ mtd->ecc_strength = 1;
/* Fill in remaining MTD driver data */
mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->erase = onenand_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = onenand_read;
- mtd->write = onenand_write;
- mtd->read_oob = onenand_read_oob;
- mtd->write_oob = onenand_write_oob;
- mtd->panic_write = onenand_panic_write;
+ mtd->_erase = onenand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = onenand_read;
+ mtd->_write = onenand_write;
+ mtd->_read_oob = onenand_read_oob;
+ mtd->_write_oob = onenand_write_oob;
+ mtd->_panic_write = onenand_panic_write;
#ifdef CONFIG_MTD_ONENAND_OTP
- mtd->get_fact_prot_info = onenand_get_fact_prot_info;
- mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
- mtd->get_user_prot_info = onenand_get_user_prot_info;
- mtd->read_user_prot_reg = onenand_read_user_prot_reg;
- mtd->write_user_prot_reg = onenand_write_user_prot_reg;
- mtd->lock_user_prot_reg = onenand_lock_user_prot_reg;
+ mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
+ mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
+ mtd->_get_user_prot_info = onenand_get_user_prot_info;
+ mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
+ mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
#endif
- mtd->sync = onenand_sync;
- mtd->lock = onenand_lock;
- mtd->unlock = onenand_unlock;
- mtd->suspend = onenand_suspend;
- mtd->resume = onenand_resume;
- mtd->block_isbad = onenand_block_isbad;
- mtd->block_markbad = onenand_block_markbad;
+ mtd->_sync = onenand_sync;
+ mtd->_lock = onenand_lock;
+ mtd->_unlock = onenand_unlock;
+ mtd->_suspend = onenand_suspend;
+ mtd->_resume = onenand_resume;
+ mtd->_block_isbad = onenand_block_isbad;
+ mtd->_block_markbad = onenand_block_markbad;
mtd->owner = THIS_MODULE;
mtd->writebufsize = mtd->writesize;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index fa1ee43f735b..8e4b3f2742ba 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -923,7 +923,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
dev_err(&pdev->dev, "no buffer memory resource defined\n");
- return -ENOENT;
+ err = -ENOENT;
goto ahb_resource_failed;
}
@@ -964,7 +964,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
dev_err(&pdev->dev, "no dma memory resource defined\n");
- return -ENOENT;
+ err = -ENOENT;
goto dma_resource_failed;
}
@@ -1014,7 +1014,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
- err = mtd_device_parse_register(mtd, NULL, 0,
+ err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 48970c14beff..580035c803d6 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
- while (mtd_can_have_bb(master) &&
- mtd_block_isbad(master, offset)) {
+ while (mtd_block_isbad(master, offset)) {
if (!offset) {
nogood:
printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +88,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
} else {
offset = directory * master->erasesize;
- while (mtd_can_have_bb(master) &&
- mtd_block_isbad(master, offset)) {
+ while (mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 072ed5970e2f..9e2dfd517aa5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops sm_ftl_ops = {
.name = "smblk",
- .major = -1,
+ .major = 0,
.part_bits = SM_FTL_PARTN_BITS,
.blksize = SM_SECTOR_SIZE,
.getgeo = sm_getgeo,
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 941bc3c05d6e..90b98822d9a4 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -174,11 +174,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
int err = 0, lnum, offs, total_read;
struct gluebi_device *gluebi;
- if (len < 0 || from < 0 || from + len > mtd->size)
- return -EINVAL;
-
gluebi = container_of(mtd, struct gluebi_device, mtd);
-
lnum = div_u64_rem(from, mtd->erasesize, &offs);
total_read = len;
while (total_read) {
@@ -218,14 +214,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
int err = 0, lnum, offs, total_written;
struct gluebi_device *gluebi;
- if (len < 0 || to < 0 || len + to > mtd->size)
- return -EINVAL;
-
gluebi = container_of(mtd, struct gluebi_device, mtd);
-
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
lnum = div_u64_rem(to, mtd->erasesize, &offs);
if (len % mtd->writesize || offs % mtd->writesize)
@@ -265,21 +254,13 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
int err, i, lnum, count;
struct gluebi_device *gluebi;
- if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
- return -EINVAL;
- if (instr->len < 0 || instr->addr + instr->len > mtd->size)
- return -EINVAL;
if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
return -EINVAL;
lnum = mtd_div_by_eb(instr->addr, mtd);
count = mtd_div_by_eb(instr->len, mtd);
-
gluebi = container_of(mtd, struct gluebi_device, mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
for (i = 0; i < count - 1; i++) {
err = ubi_leb_unmap(gluebi->desc, lnum + i);
if (err)
@@ -340,11 +321,11 @@ static int gluebi_create(struct ubi_device_info *di,
mtd->owner = THIS_MODULE;
mtd->writesize = di->min_io_size;
mtd->erasesize = vi->usable_leb_size;
- mtd->read = gluebi_read;
- mtd->write = gluebi_write;
- mtd->erase = gluebi_erase;
- mtd->get_device = gluebi_get_device;
- mtd->put_device = gluebi_put_device;
+ mtd->_read = gluebi_read;
+ mtd->_write = gluebi_write;
+ mtd->_erase = gluebi_erase;
+ mtd->_get_device = gluebi_get_device;
+ mtd->_put_device = gluebi_put_device;
/*
* In case of dynamic a volume, MTD device size is just volume size. In
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0686b93f1857..f84dd2dc82b6 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -458,7 +458,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
if (sg_dma_len(&ctl->sg) % 4)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
- ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+ ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
&ctl->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
@@ -570,7 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
- ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+ ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 26b3c23b0b6f..758148379b0e 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -193,7 +193,7 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
erase->state = MTD_ERASE_DONE;
} else {
erase->state = MTD_ERASE_FAILED;
- erase->fail_addr = 0xffffffff;
+ erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
}
mtd_erase_callback(erase);
return rc;
@@ -263,10 +263,10 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx_mtd;
part->mtd.name = part->name;
- part->mtd.erase = efx_mtd_erase;
- part->mtd.read = efx_mtd->ops->read;
- part->mtd.write = efx_mtd->ops->write;
- part->mtd.sync = efx_mtd_sync;
+ part->mtd._erase = efx_mtd_erase;
+ part->mtd._read = efx_mtd->ops->read;
+ part->mtd._write = efx_mtd->ops->write;
+ part->mtd._sync = efx_mtd_sync;
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 423eb26386c8..d70ede7a7f96 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -290,8 +290,8 @@ config FARSYNC
Frame Relay or X.25/LAPB.
If you want the module to be automatically loaded when the interface
- is referenced then you should add "alias hdlcX farsync" to
- /etc/modprobe.conf for each interface, where X is 0, 1, 2, ..., or
+ is referenced then you should add "alias hdlcX farsync" to a file
+ in /etc/modprobe.d/ for each interface, where X is 0, 1, 2, ..., or
simply use "alias hdlc* farsync" to indicate all of them.
To compile this driver as a module, choose M here: the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 060fd22a1103..0f150f271c2a 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -277,40 +277,6 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
return 0;
}
-/**
- * acpi_dev_run_wake - Enable/disable wake-up for given device.
- * @phys_dev: Device to enable/disable the platform to wake-up the system for.
- * @enable: Whether enable or disable the wake-up functionality.
- *
- * Find the ACPI device object corresponding to @pci_dev and try to
- * enable/disable the GPE associated with it.
- */
-static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
-{
- struct acpi_device *dev;
- acpi_handle handle;
-
- if (!device_run_wake(phys_dev))
- return -EINVAL;
-
- handle = DEVICE_ACPI_HANDLE(phys_dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
- dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
- __func__);
- return -ENODEV;
- }
-
- if (enable) {
- acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
- acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
- } else {
- acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
- acpi_disable_wakeup_device_power(dev);
- }
-
- return 0;
-}
-
static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
@@ -318,14 +284,14 @@ static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
if (bridge->pme_interrupt)
return;
- if (!acpi_dev_run_wake(&bridge->dev, enable))
+ if (!acpi_pm_device_run_wake(&bridge->dev, enable))
return;
bus = bus->parent;
}
/* We have reached the root bus. */
if (bus->bridge)
- acpi_dev_run_wake(bus->bridge, enable);
+ acpi_pm_device_run_wake(bus->bridge, enable);
}
static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
@@ -333,7 +299,7 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
if (dev->pme_interrupt)
return 0;
- if (!acpi_dev_run_wake(&dev->dev, enable))
+ if (!acpi_pm_device_run_wake(&dev->dev, enable))
return 0;
acpi_pci_propagate_run_wake(dev->bus, enable);
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 1dd68f502634..9694c1e783a5 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -16,13 +16,13 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#include <pcmcia/ss.h>
#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <asm/gpio.h>
#include <mach/board.h>
#include <mach/at91rm9200_mc.h>
@@ -70,7 +70,7 @@ static irqreturn_t at91_cf_irq(int irq, void *_cf)
{
struct at91_cf_socket *cf = _cf;
- if (irq == cf->board->det_pin) {
+ if (irq == gpio_to_irq(cf->board->det_pin)) {
unsigned present = at91_cf_present(cf);
/* kick pccard as needed */
@@ -96,8 +96,8 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
/* NOTE: CF is always 3VCARD */
if (at91_cf_present(cf)) {
- int rdy = cf->board->irq_pin; /* RDY/nIRQ */
- int vcc = cf->board->vcc_pin;
+ int rdy = gpio_is_valid(cf->board->irq_pin); /* RDY/nIRQ */
+ int vcc = gpio_is_valid(cf->board->vcc_pin);
*sp = SS_DETECT | SS_3VCARD;
if (!rdy || gpio_get_value(rdy))
@@ -118,7 +118,7 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
cf = container_of(sock, struct at91_cf_socket, socket);
/* switch Vcc if needed and possible */
- if (cf->board->vcc_pin) {
+ if (gpio_is_valid(cf->board->vcc_pin)) {
switch (s->Vcc) {
case 0:
gpio_set_value(cf->board->vcc_pin, 0);
@@ -222,7 +222,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
struct resource *io;
int status;
- if (!board || !board->det_pin || !board->rst_pin)
+ if (!board || !gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
return -ENODEV;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -242,7 +242,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
status = gpio_request(board->det_pin, "cf_det");
if (status < 0)
goto fail0;
- status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
+ status = request_irq(gpio_to_irq(board->det_pin), at91_cf_irq, 0, driver_name, cf);
if (status < 0)
goto fail00;
device_init_wakeup(&pdev->dev, 1);
@@ -251,7 +251,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
if (status < 0)
goto fail0a;
- if (board->vcc_pin) {
+ if (gpio_is_valid(board->vcc_pin)) {
status = gpio_request(board->vcc_pin, "cf_vcc");
if (status < 0)
goto fail0b;
@@ -263,15 +263,15 @@ static int __init at91_cf_probe(struct platform_device *pdev)
* unless we report that we handle everything (sigh).
* (Note: DK board doesn't wire the IRQ pin...)
*/
- if (board->irq_pin) {
+ if (gpio_is_valid(board->irq_pin)) {
status = gpio_request(board->irq_pin, "cf_irq");
if (status < 0)
goto fail0c;
- status = request_irq(board->irq_pin, at91_cf_irq,
+ status = request_irq(gpio_to_irq(board->irq_pin), at91_cf_irq,
IRQF_SHARED, driver_name, cf);
if (status < 0)
goto fail0d;
- cf->socket.pci_irq = board->irq_pin;
+ cf->socket.pci_irq = gpio_to_irq(board->irq_pin);
} else
cf->socket.pci_irq = nr_irqs + 1;
@@ -290,7 +290,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
}
pr_info("%s: irqs det #%d, io #%d\n", driver_name,
- board->det_pin, board->irq_pin);
+ gpio_to_irq(board->det_pin), gpio_to_irq(board->irq_pin));
cf->socket.owner = THIS_MODULE;
cf->socket.dev.parent = &pdev->dev;
@@ -312,19 +312,19 @@ fail2:
fail1:
if (cf->socket.io_offset)
iounmap((void __iomem *) cf->socket.io_offset);
- if (board->irq_pin) {
- free_irq(board->irq_pin, cf);
+ if (gpio_is_valid(board->irq_pin)) {
+ free_irq(gpio_to_irq(board->irq_pin), cf);
fail0d:
gpio_free(board->irq_pin);
}
fail0c:
- if (board->vcc_pin)
+ if (gpio_is_valid(board->vcc_pin))
gpio_free(board->vcc_pin);
fail0b:
gpio_free(board->rst_pin);
fail0a:
device_init_wakeup(&pdev->dev, 0);
- free_irq(board->det_pin, cf);
+ free_irq(gpio_to_irq(board->det_pin), cf);
fail00:
gpio_free(board->det_pin);
fail0:
@@ -341,15 +341,15 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
pcmcia_unregister_socket(&cf->socket);
release_mem_region(io->start, resource_size(io));
iounmap((void __iomem *) cf->socket.io_offset);
- if (board->irq_pin) {
- free_irq(board->irq_pin, cf);
+ if (gpio_is_valid(board->irq_pin)) {
+ free_irq(gpio_to_irq(board->irq_pin), cf);
gpio_free(board->irq_pin);
}
- if (board->vcc_pin)
+ if (gpio_is_valid(board->vcc_pin))
gpio_free(board->vcc_pin);
gpio_free(board->rst_pin);
device_init_wakeup(&pdev->dev, 0);
- free_irq(board->det_pin, cf);
+ free_irq(gpio_to_irq(board->det_pin), cf);
gpio_free(board->det_pin);
kfree(cf);
return 0;
@@ -363,9 +363,9 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
struct at91_cf_data *board = cf->board;
if (device_may_wakeup(&pdev->dev)) {
- enable_irq_wake(board->det_pin);
- if (board->irq_pin)
- enable_irq_wake(board->irq_pin);
+ enable_irq_wake(gpio_to_irq(board->det_pin));
+ if (gpio_is_valid(board->irq_pin))
+ enable_irq_wake(gpio_to_irq(board->irq_pin));
}
return 0;
}
@@ -376,9 +376,9 @@ static int at91_cf_resume(struct platform_device *pdev)
struct at91_cf_data *board = cf->board;
if (device_may_wakeup(&pdev->dev)) {
- disable_irq_wake(board->det_pin);
- if (board->irq_pin)
- disable_irq_wake(board->irq_pin);
+ disable_irq_wake(gpio_to_irq(board->det_pin));
+ if (gpio_is_valid(board->irq_pin))
+ disable_irq_wake(gpio_to_irq(board->irq_pin));
}
return 0;
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index 693577e0fefc..c2e997a570bf 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -475,7 +475,7 @@ static void __devexit bcm63xx_cb_exit(struct pci_dev *dev)
bcm63xx_cb_dev = NULL;
}
-static struct pci_device_id bcm63xx_cb_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(bcm63xx_cb_table) = {
{
.vendor = PCI_VENDOR_ID_BROADCOM,
.device = BCM6348_CPU_ID,
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 49221395101e..ac1a2232eab9 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -310,18 +310,7 @@ static struct platform_driver bfin_cf_driver = {
.remove = __devexit_p(bfin_cf_remove),
};
-static int __init bfin_cf_init(void)
-{
- return platform_driver_register(&bfin_cf_driver);
-}
-
-static void __exit bfin_cf_exit(void)
-{
- platform_driver_unregister(&bfin_cf_driver);
-}
-
-module_init(bfin_cf_init);
-module_exit(bfin_cf_exit);
+module_platform_driver(bfin_cf_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 5b7c22784aff..a484b1fb3382 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -172,12 +172,12 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
if ((sock->board_type == BOARD_TYPE_DB1200) ||
(sock->board_type == BOARD_TYPE_DB1300)) {
ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
- IRQF_DISABLED, "pcmcia_insert", sock);
+ 0, "pcmcia_insert", sock);
if (ret)
goto out1;
ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
- IRQF_DISABLED, "pcmcia_eject", sock);
+ 0, "pcmcia_eject", sock);
if (ret) {
free_irq(sock->insert_irq, sock);
goto out1;
@@ -580,18 +580,7 @@ static struct platform_driver db1x_pcmcia_socket_driver = {
.remove = __devexit_p(db1x_pcmcia_socket_remove),
};
-int __init db1x_pcmcia_socket_load(void)
-{
- return platform_driver_register(&db1x_pcmcia_socket_driver);
-}
-
-void __exit db1x_pcmcia_socket_unload(void)
-{
- platform_driver_unregister(&db1x_pcmcia_socket_driver);
-}
-
-module_init(db1x_pcmcia_socket_load);
-module_exit(db1x_pcmcia_socket_unload);
+module_platform_driver(db1x_pcmcia_socket_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCMCIA Socket Services for Alchemy Db/Pb1x00 boards");
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 06ad3e5e7d3d..7647d232e9e2 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -365,17 +365,7 @@ static struct platform_driver electra_cf_driver = {
.remove = electra_cf_remove,
};
-static int __init electra_cf_init(void)
-{
- return platform_driver_register(&electra_cf_driver);
-}
-module_init(electra_cf_init);
-
-static void __exit electra_cf_exit(void)
-{
- platform_driver_unregister(&electra_cf_driver);
-}
-module_exit(electra_cf_exit);
+module_platform_driver(electra_cf_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 0b66bfc0e148..4e8831bdb6ef 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -25,14 +25,9 @@
MODULE_LICENSE("GPL");
/* PCI core routines */
-static struct pci_device_id i82092aa_pci_ids[] = {
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_82092AA_0,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- {}
+static DEFINE_PCI_DEVICE_TABLE(i82092aa_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82092AA_0) },
+ { }
};
MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index a317defd616d..a3a851e49321 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1303,15 +1303,4 @@ static struct platform_driver m8xx_pcmcia_driver = {
.remove = m8xx_remove,
};
-static int __init m8xx_init(void)
-{
- return platform_driver_register(&m8xx_pcmcia_driver);
-}
-
-static void __exit m8xx_exit(void)
-{
- platform_driver_unregister(&m8xx_pcmcia_driver);
-}
-
-module_init(m8xx_init);
-module_exit(m8xx_exit);
+module_platform_driver(m8xx_pcmcia_driver);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 0f8b70b27762..253e3867dec7 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -762,13 +762,8 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
kfree(socket);
}
-static struct pci_device_id pd6729_pci_ids[] = {
- {
- .vendor = PCI_VENDOR_ID_CIRRUS,
- .device = PCI_DEVICE_ID_CIRRUS_6729,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
+static DEFINE_PCI_DEVICE_TABLE(pd6729_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6729) },
{ }
};
MODULE_DEVICE_TABLE(pci, pd6729_pci_ids);
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index adfae4987a42..cb0c37ec7f24 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -177,18 +177,7 @@ static struct platform_driver viper_pcmcia_driver = {
.id_table = viper_pcmcia_id_table,
};
-static int __init viper_pcmcia_init(void)
-{
- return platform_driver_register(&viper_pcmcia_driver);
-}
-
-static void __exit viper_pcmcia_exit(void)
-{
- return platform_driver_unregister(&viper_pcmcia_driver);
-}
-
-module_init(viper_pcmcia_init);
-module_exit(viper_pcmcia_exit);
+module_platform_driver(viper_pcmcia_driver);
MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table);
MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index c6d36b3a6ce8..cd0a315d922b 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -563,11 +563,8 @@ static int __devinit vrc4173_cardu_setup(char *options)
__setup("vrc4173_cardu=", vrc4173_cardu_setup);
-static struct pci_device_id vrc4173_cardu_id_table[] __devinitdata = {
- { .vendor = PCI_VENDOR_ID_NEC,
- .device = PCI_DEVICE_ID_NEC_NAPCCARD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID, },
+static DEFINE_PCI_DEVICE_TABLE(vrc4173_cardu_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) },
{0, }
};
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 8f6698074f8e..fd5fbd10aad0 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -320,18 +320,7 @@ static struct platform_driver xxs1500_pcmcia_socket_driver = {
.remove = __devexit_p(xxs1500_pcmcia_remove),
};
-int __init xxs1500_pcmcia_socket_load(void)
-{
- return platform_driver_register(&xxs1500_pcmcia_socket_driver);
-}
-
-void __exit xxs1500_pcmcia_socket_unload(void)
-{
- platform_driver_unregister(&xxs1500_pcmcia_socket_driver);
-}
-
-module_init(xxs1500_pcmcia_socket_load);
-module_exit(xxs1500_pcmcia_socket_unload);
+module_platform_driver(xxs1500_pcmcia_socket_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 849c0c11d2af..d07f9ac8c41d 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1352,7 +1352,7 @@ static const struct dev_pm_ops yenta_pm_ops = {
.driver_data = CARDBUS_TYPE_##type, \
}
-static struct pci_device_id yenta_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(yenta_table) = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
/*
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 88a98cff5a44..f7ba316e0ed6 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -609,25 +609,16 @@ static bool mcp_exceeded(struct ips_driver *ips)
bool ret = false;
u32 temp_limit;
u32 avg_power;
- const char *msg = "MCP limit exceeded: ";
spin_lock_irqsave(&ips->turbo_status_lock, flags);
temp_limit = ips->mcp_temp_limit * 100;
- if (ips->mcp_avg_temp > temp_limit) {
- dev_info(&ips->dev->dev,
- "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
- temp_limit);
+ if (ips->mcp_avg_temp > temp_limit)
ret = true;
- }
avg_power = ips->cpu_avg_power + ips->mch_avg_power;
- if (avg_power > ips->mcp_power_limit) {
- dev_info(&ips->dev->dev,
- "%sAvg power %u, limit %u\n", msg, avg_power,
- ips->mcp_power_limit);
+ if (avg_power > ips->mcp_power_limit)
ret = true;
- }
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b00c17612a89..d21e8f59c84e 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -321,9 +321,14 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
{
struct acpi_device *acpi = to_acpi_device(dev);
struct pnp_dev *pnp = _pnp;
+ struct device *physical_device;
+
+ physical_device = acpi_get_physical_device(acpi->handle);
+ if (physical_device)
+ put_device(physical_device);
/* true means it matched */
- return !acpi_get_physical_device(acpi->handle)
+ return !physical_device
&& compare_pnp_id(pnp->id, acpi_device_hid(acpi));
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 459f66437fe9..99dc29f2f2f2 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -249,7 +249,7 @@ config CHARGER_TWL4030
Say Y here to enable support for TWL4030 Battery Charge Interface.
config CHARGER_LP8727
- tristate "National Semiconductor LP8727 charger driver"
+ tristate "TI/National Semiconductor LP8727 charger driver"
depends on I2C
help
Say Y here to enable support for LP8727 Charger Driver.
@@ -288,4 +288,23 @@ config CHARGER_MAX8998
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8998/LP3974 PMICs.
+config CHARGER_SMB347
+ tristate "Summit Microelectronics SMB347 Battery Charger"
+ depends on I2C
+ help
+ Say Y to include support for Summit Microelectronics SMB347
+ Battery Charger.
+
+config AB8500_BM
+ bool "AB8500 Battery Management Driver"
+ depends on AB8500_CORE && AB8500_GPADC
+ help
+ Say Y to include support for AB5500 battery management.
+
+config AB8500_BATTERY_THERM_ON_BATCTRL
+ bool "Thermistor connected on BATCTRL ADC"
+ depends on AB8500_BM
+ help
+ Say Y to enable battery temperature measurements using
+ thermistor connected on BATCTRL ADC.
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c590fa533406..b6b243416c0e 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
+obj-$(CONFIG_AB8500_BM) += ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
@@ -42,3 +43,4 @@ obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
new file mode 100644
index 000000000000..d8bb99394ac0
--- /dev/null
+++ b/drivers/power/ab8500_btemp.c
@@ -0,0 +1,1124 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Battery temperature driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ * Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/jiffies.h>
+
+#define VTVOUT_V 1800
+
+#define BTEMP_THERMAL_LOW_LIMIT -10
+#define BTEMP_THERMAL_MED_LIMIT 0
+#define BTEMP_THERMAL_HIGH_LIMIT_52 52
+#define BTEMP_THERMAL_HIGH_LIMIT_57 57
+#define BTEMP_THERMAL_HIGH_LIMIT_62 62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA 7
+#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+
+#define to_ab8500_btemp_device_info(x) container_of((x), \
+ struct ab8500_btemp, btemp_psy);
+
+/**
+ * struct ab8500_btemp_interrupts - ab8500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_btemp_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_btemp_events {
+ bool batt_rem;
+ bool btemp_high;
+ bool btemp_medhigh;
+ bool btemp_lowmed;
+ bool btemp_low;
+ bool ac_conn;
+ bool usb_conn;
+};
+
+struct ab8500_btemp_ranges {
+ int btemp_high_limit;
+ int btemp_med_limit;
+ int btemp_low_limit;
+};
+
+/**
+ * struct ab8500_btemp - ab8500 BTEMP device information
+ * @dev: Pointer to the structure device
+ * @node: List of AB8500 BTEMPs, hence prepared for reentrance
+ * @curr_source: What current source we use, in uA
+ * @bat_temp: Battery temperature in degree Celcius
+ * @prev_bat_temp Last dispatched battery temperature
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @fg: Pointer to the struct fg
+ * @pdata: Pointer to the abx500_btemp platform data
+ * @bat: Pointer to the abx500_bm platform data
+ * @btemp_psy: Structure for BTEMP specific battery properties
+ * @events: Structure for information about events triggered
+ * @btemp_ranges: Battery temperature range structure
+ * @btemp_wq: Work queue for measuring the temperature periodically
+ * @btemp_periodic_work: Work for measuring the temperature periodically
+ */
+struct ab8500_btemp {
+ struct device *dev;
+ struct list_head node;
+ int curr_source;
+ int bat_temp;
+ int prev_bat_temp;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_fg *fg;
+ struct abx500_btemp_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply btemp_psy;
+ struct ab8500_btemp_events events;
+ struct ab8500_btemp_ranges btemp_ranges;
+ struct workqueue_struct *btemp_wq;
+ struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab8500_btemp_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab8500_btemp_list);
+
+/**
+ * ab8500_btemp_get() - returns a reference to the primary AB8500 BTEMP
+ * (i.e. the first BTEMP in the instance list)
+ */
+struct ab8500_btemp *ab8500_btemp_get(void)
+{
+ struct ab8500_btemp *btemp;
+ btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
+
+ return btemp;
+}
+
+/**
+ * ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
+ * @di: pointer to the ab8500_btemp structure
+ * @v_batctrl: measured batctrl voltage
+ * @inst_curr: measured instant current
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL voltage.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
+ int v_batctrl, int inst_curr)
+{
+ int rbs;
+
+ if (is_ab8500_1p1_or_earlier(di->parent)) {
+ /*
+ * For ABB cut1.0 and 1.1 BAT_CTRL is internally
+ * connected to 1.8V through a 450k resistor
+ */
+ return (450000 * (v_batctrl)) / (1800 - v_batctrl);
+ }
+
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+ /*
+ * If the battery has internal NTC, we use the current
+ * source to calculate the resistance, 7uA or 20uA
+ */
+ rbs = (v_batctrl * 1000
+ - di->bat->gnd_lift_resistance * inst_curr)
+ / di->curr_source;
+ } else {
+ /*
+ * BAT_CTRL is internally
+ * connected to 1.8V through a 80k resistor
+ */
+ rbs = (80000 * (v_batctrl)) / (1800 - v_batctrl);
+ }
+
+ return rbs;
+}
+
+/**
+ * ab8500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
+{
+ int vbtemp;
+ static int prev;
+
+ vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
+ if (vbtemp < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value",
+ __func__);
+ return prev;
+ }
+ prev = vbtemp;
+ return vbtemp;
+}
+
+/**
+ * ab8500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di: pointer to the ab8500_btemp structure
+ * @enable: enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
+ bool enable)
+{
+ int curr;
+ int ret = 0;
+
+ /*
+ * BATCTRL current sources are included on AB8500 cut2.0
+ * and future versions
+ */
+ if (is_ab8500_1p1_or_earlier(di->parent))
+ return 0;
+
+ /* Only do this for batteries with internal NTC */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+ if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+ curr = BAT_CTRL_7U_ENA;
+ else
+ curr = BAT_CTRL_20U_ENA;
+
+ dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed setting cmp_force\n",
+ __func__);
+ return ret;
+ }
+
+ /*
+ * We have to wait one 32kHz cycle before enabling
+ * the current source, since ForceBatCtrlCmpHigh needs
+ * to be written in a separate cycle
+ */
+ udelay(32);
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH | curr);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+ } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+ dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+
+ /* Enable Pull-Up and comparator */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling PU and comp\n",
+ __func__);
+ goto enable_pu_comp;
+ }
+
+ /*
+ * We have to wait one 32kHz cycle before disabling
+ * ForceBatCtrlCmpHigh since this needs to be written
+ * in a separate cycle
+ */
+ udelay(32);
+
+ /* Disable 'force comparator' */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ goto disable_force_comp;
+ }
+ }
+ return ret;
+
+ /*
+ * We have to try unsetting FORCE_BAT_CTRL_CMP_HIGH one more time
+ * if we got an error above
+ */
+disable_curr_source:
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ return ret;
+ }
+enable_pu_comp:
+ /* Enable Pull-Up and comparator */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling PU and comp\n",
+ __func__);
+ return ret;
+ }
+
+disable_force_comp:
+ /*
+ * We have to wait one 32kHz cycle before disabling
+ * ForceBatCtrlCmpHigh since this needs to be written
+ * in a separate cycle
+ */
+ udelay(32);
+
+ /* Disable 'force comparator' */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_res() - get battery resistance
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
+{
+ int ret;
+ int batctrl = 0;
+ int res;
+ int inst_curr;
+ int i;
+
+ /*
+ * BATCTRL current sources are included on AB8500 cut2.0
+ * and future versions
+ */
+ ret = ab8500_btemp_curr_source_enable(di, true);
+ if (ret) {
+ dev_err(di->dev, "%s curr source enabled failed\n", __func__);
+ return ret;
+ }
+
+ if (!di->fg)
+ di->fg = ab8500_fg_get();
+ if (!di->fg) {
+ dev_err(di->dev, "No fg found\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_fg_inst_curr_start(di->fg);
+
+ if (ret) {
+ dev_err(di->dev, "Failed to start current measurement\n");
+ return ret;
+ }
+
+ /*
+ * Since there is no interrupt when current measurement is done,
+ * loop for over 250ms (250ms is one sample conversion time
+ * with 32.768 Khz RTC clock). Note that a stop time must be set
+ * since the ab8500_btemp_read_batctrl_voltage call can block and
+ * take an unknown amount of time to complete.
+ */
+ i = 0;
+
+ do {
+ batctrl += ab8500_btemp_read_batctrl_voltage(di);
+ i++;
+ msleep(20);
+ } while (!ab8500_fg_inst_curr_done(di->fg));
+ batctrl /= i;
+
+ ret = ab8500_fg_inst_curr_finalize(di->fg, &inst_curr);
+ if (ret) {
+ dev_err(di->dev, "Failed to finalize current measurement\n");
+ return ret;
+ }
+
+ res = ab8500_btemp_batctrl_volt_to_res(di, batctrl, inst_curr);
+
+ ret = ab8500_btemp_curr_source_enable(di, false);
+ if (ret) {
+ dev_err(di->dev, "%s curr source disable failed\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(di->dev, "%s batctrl: %d res: %d inst_curr: %d samples: %d\n",
+ __func__, batctrl, res, inst_curr, i);
+
+ return res;
+}
+
+/**
+ * ab8500_btemp_res_to_temp() - resistance to temperature
+ * @di: pointer to the ab8500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @res: resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
+ const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+{
+ int i, temp;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (res > tbl[0].resist)
+ i = 0;
+ else if (res <= tbl[tbl_size - 1].resist)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(res <= tbl[i].resist &&
+ res > tbl[i + 1].resist))
+ i++;
+ }
+
+ temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+ return temp;
+}
+
+/**
+ * ab8500_btemp_measure_temp() - measure battery temperature
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
+{
+ int temp;
+ static int prev;
+ int rbat, rntc, vntc;
+ u8 id;
+
+ id = di->bat->batt_id;
+
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ id != BATTERY_UNKNOWN) {
+
+ rbat = ab8500_btemp_get_batctrl_res(di);
+ if (rbat < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n",
+ __func__);
+ /*
+ * Return out-of-range temperature so that
+ * charging is stopped
+ */
+ return BTEMP_THERMAL_LOW_LIMIT;
+ }
+
+ temp = ab8500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ } else {
+ vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
+ if (vntc < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed,"
+ " using previous value\n", __func__);
+ return prev;
+ }
+ /*
+ * The PCB NTC is sourced from VTVOUT via a 230kOhm
+ * resistor.
+ */
+ rntc = 230000 * vntc / (VTVOUT_V - vntc);
+
+ temp = ab8500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+ prev = temp;
+ }
+ dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+ return temp;
+}
+
+/**
+ * ab8500_btemp_id() - Identify the connected battery
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab8500_btemp_id(struct ab8500_btemp *di)
+{
+ int res;
+ u8 i;
+
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->bat->batt_id = BATTERY_UNKNOWN;
+
+ res = ab8500_btemp_get_batctrl_res(di);
+ if (res < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+ return -ENXIO;
+ }
+
+ /* BATTERY_UNKNOWN is defined on position 0, skip it! */
+ for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+ if ((res <= di->bat->bat_type[i].resis_high) &&
+ (res >= di->bat->bat_type[i].resis_low)) {
+ dev_dbg(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bat->bat_type[i].resis_low, res,
+ di->bat->bat_type[i].resis_high, i);
+
+ di->bat->batt_id = i;
+ break;
+ }
+ }
+
+ if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ dev_warn(di->dev, "Battery identified as unknown"
+ ", resistance %d Ohm\n", res);
+ return -ENXIO;
+ }
+
+ /*
+ * We only have to change current source if the
+ * detected type is Type 1, else we use the 7uA source
+ */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ di->bat->batt_id == 1) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+ }
+
+ return di->bat->batt_id;
+}
+
+/**
+ * ab8500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab8500_btemp_periodic_work(struct work_struct *work)
+{
+ int interval;
+ struct ab8500_btemp *di = container_of(work,
+ struct ab8500_btemp, btemp_periodic_work.work);
+
+ di->bat_temp = ab8500_btemp_measure_temp(di);
+
+ if (di->bat_temp != di->prev_bat_temp) {
+ di->prev_bat_temp = di->bat_temp;
+ power_supply_changed(&di->btemp_psy);
+ }
+
+ if (di->events.ac_conn || di->events.usb_conn)
+ interval = di->bat->temp_interval_chg;
+ else
+ interval = di->bat->temp_interval_nochg;
+
+ /* Schedule a new measurement */
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(interval * HZ));
+}
+
+/**
+ * ab8500_btemp_batctrlindb_handler() - battery removal detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_batctrlindb_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+ dev_err(di->dev, "Battery removal detected!\n");
+
+ di->events.batt_rem = true;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_templow_handler() - battery temp lower than 10 degrees
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ if (is_ab8500_2p0_or_earlier(di->parent)) {
+ dev_dbg(di->dev, "Ignore false btemp low irq"
+ " for ABB cut 1.0, 1.1 and 2.0\n");
+ } else {
+ dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
+
+ di->events.btemp_low = true;
+ di->events.btemp_high = false;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_lowmed = false;
+ power_supply_changed(&di->btemp_psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_temphigh_handler() - battery temp higher than max temp
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_temphigh_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_crit(di->dev, "Battery temperature is higher than MAX temp\n");
+
+ di->events.btemp_high = true;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_lowmed = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_lowmed_handler() - battery temp between low and medium
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_lowmed_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_dbg(di->dev, "Battery temperature is between low and medium\n");
+
+ di->events.btemp_lowmed = true;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_high = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_medhigh_handler() - battery temp between medium and high
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_medhigh_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_dbg(di->dev, "Battery temperature is between medium and high\n");
+
+ di->events.btemp_medhigh = true;
+ di->events.btemp_lowmed = false;
+ di->events.btemp_high = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_periodic() - Periodic temperature measurements
+ * @di: pointer to the ab8500_btemp structure
+ * @enable: enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab8500_btemp_periodic(struct ab8500_btemp *di,
+ bool enable)
+{
+ dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+ enable);
+ /*
+ * Make sure a new measurement is done directly by cancelling
+ * any pending work
+ */
+ cancel_delayed_work_sync(&di->btemp_periodic_work);
+
+ if (enable)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+}
+
+/**
+ * ab8500_btemp_get_temp() - get battery temperature
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature
+ */
+static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
+{
+ int temp = 0;
+
+ /*
+ * The BTEMP events are not reliabe on AB8500 cut2.0
+ * and prior versions
+ */
+ if (is_ab8500_2p0_or_earlier(di->parent)) {
+ temp = di->bat_temp * 10;
+ } else {
+ if (di->events.btemp_low) {
+ if (temp > di->btemp_ranges.btemp_low_limit)
+ temp = di->btemp_ranges.btemp_low_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_high) {
+ if (temp < di->btemp_ranges.btemp_high_limit)
+ temp = di->btemp_ranges.btemp_high_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_lowmed) {
+ if (temp > di->btemp_ranges.btemp_med_limit)
+ temp = di->btemp_ranges.btemp_med_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_medhigh) {
+ if (temp < di->btemp_ranges.btemp_med_limit)
+ temp = di->btemp_ranges.btemp_med_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else
+ temp = di->bat_temp * 10;
+ }
+ return temp;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_temp() - get the temperature
+ * @btemp: pointer to the btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+ return btemp->bat_temp * 1000;
+}
+
+/**
+ * ab8500_btemp_get_property() - get the btemp properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online: presence of the battery
+ * present: presence of the battery
+ * technology: battery technology
+ * temp: battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_btemp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_btemp *di;
+
+ di = to_ab8500_btemp_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (di->events.batt_rem)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = ab8500_btemp_get_temp(di);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_btemp *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_btemp_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval && di->events.ac_conn) {
+ di->events.ac_conn = false;
+ }
+ /* AC connected */
+ else if (ret.intval && !di->events.ac_conn) {
+ di->events.ac_conn = true;
+ if (!di->events.usb_conn)
+ ab8500_btemp_periodic(di, true);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval && di->events.usb_conn) {
+ di->events.usb_conn = false;
+ }
+ /* USB connected */
+ else if (ret.intval && !di->events.usb_conn) {
+ di->events.usb_conn = true;
+ if (!di->events.ac_conn)
+ ab8500_btemp_periodic(di, true);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_btemp_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab8500_btemp_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_btemp *di = to_ab8500_btemp_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+}
+
+/* ab8500 btemp driver interrupts and their respective isr */
+static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
+ {"BAT_CTRL_INDB", ab8500_btemp_batctrlindb_handler},
+ {"BTEMP_LOW", ab8500_btemp_templow_handler},
+ {"BTEMP_HIGH", ab8500_btemp_temphigh_handler},
+ {"BTEMP_LOW_MEDIUM", ab8500_btemp_lowmed_handler},
+ {"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
+};
+
+#if defined(CONFIG_PM)
+static int ab8500_btemp_resume(struct platform_device *pdev)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+ ab8500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static int ab8500_btemp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+ ab8500_btemp_periodic(di, false);
+
+ return 0;
+}
+#else
+#define ab8500_btemp_suspend NULL
+#define ab8500_btemp_resume NULL
+#endif
+
+static int __devexit ab8500_btemp_remove(struct platform_device *pdev)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->btemp_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
+{
+ int irq, i, ret = 0;
+ u8 val;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab8500_btemp *di =
+ kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* get btemp specific platform data */
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->btemp;
+ if (!di->pdata) {
+ dev_err(di->dev, "no btemp platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ di->bat = plat_data->battery;
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* BTEMP supply */
+ di->btemp_psy.name = "ab8500_btemp";
+ di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->btemp_psy.properties = ab8500_btemp_props;
+ di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
+ di->btemp_psy.get_property = ab8500_btemp_get_property;
+ di->btemp_psy.supplied_to = di->pdata->supplied_to;
+ di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.external_power_changed =
+ ab8500_btemp_external_power_changed;
+
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ create_singlethread_workqueue("ab8500_btemp_wq");
+ if (di->btemp_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for measuring temperature periodically */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ ab8500_btemp_periodic_work);
+
+ /* Identify the battery */
+ if (ab8500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+
+ /* Set BTEMP thermal limits. Low and Med are fixed */
+ di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
+ di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_BTEMP_HIGH_TH, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ goto free_btemp_wq;
+ }
+ switch (val) {
+ case BTEMP_HIGH_TH_57_0:
+ case BTEMP_HIGH_TH_57_1:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_57;
+ break;
+ case BTEMP_HIGH_TH_52:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_52;
+ break;
+ case BTEMP_HIGH_TH_62:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_62;
+ break;
+ }
+
+ /* Register BTEMP power supply class */
+ ret = power_supply_register(di->dev, &di->btemp_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register BTEMP psy\n");
+ goto free_btemp_wq;
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_btemp_irq[i].name, di);
+
+ if (ret) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_btemp_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_btemp_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* Kick off periodic temperature measurements */
+ ab8500_btemp_periodic(di, true);
+ list_add_tail(&di->node, &ab8500_btemp_list);
+
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->btemp_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+free_btemp_wq:
+ destroy_workqueue(di->btemp_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_btemp_driver = {
+ .probe = ab8500_btemp_probe,
+ .remove = __devexit_p(ab8500_btemp_remove),
+ .suspend = ab8500_btemp_suspend,
+ .resume = ab8500_btemp_resume,
+ .driver = {
+ .name = "ab8500-btemp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_btemp_init(void)
+{
+ return platform_driver_register(&ab8500_btemp_driver);
+}
+
+static void __exit ab8500_btemp_exit(void)
+{
+ platform_driver_unregister(&ab8500_btemp_driver);
+}
+
+subsys_initcall_sync(ab8500_btemp_init);
+module_exit(ab8500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-btemp");
+MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
new file mode 100644
index 000000000000..e2b4accbec88
--- /dev/null
+++ b/drivers/power/ab8500_charger.c
@@ -0,0 +1,2789 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Charger driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ * Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+
+/* Charger constants */
+#define NO_PW_CONN 0
+#define AC_PW_CONN 1
+#define USB_PW_CONN 2
+
+#define MAIN_WDOG_ENA 0x01
+#define MAIN_WDOG_KICK 0x02
+#define MAIN_WDOG_DIS 0x00
+#define CHARG_WD_KICK 0x01
+#define MAIN_CH_ENA 0x01
+#define MAIN_CH_NO_OVERSHOOT_ENA_N 0x02
+#define USB_CH_ENA 0x01
+#define USB_CHG_NO_OVERSHOOT_ENA_N 0x02
+#define MAIN_CH_DET 0x01
+#define MAIN_CH_CV_ON 0x04
+#define USB_CH_CV_ON 0x08
+#define VBUS_DET_DBNC100 0x02
+#define VBUS_DET_DBNC1 0x01
+#define OTP_ENABLE_WD 0x01
+
+#define MAIN_CH_INPUT_CURR_SHIFT 4
+#define VBUS_IN_CURR_LIM_SHIFT 4
+
+#define LED_INDICATOR_PWM_ENA 0x01
+#define LED_INDICATOR_PWM_DIS 0x00
+#define LED_IND_CUR_5MA 0x04
+#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
+
+/* HW failure constants */
+#define MAIN_CH_TH_PROT 0x02
+#define VBUS_CH_NOK 0x08
+#define USB_CH_TH_PROT 0x02
+#define VBUS_OVV_TH 0x01
+#define MAIN_CH_NOK 0x01
+#define VBUS_DET 0x80
+
+/* UsbLineStatus register bit masks */
+#define AB8500_USB_LINK_STATUS 0x78
+#define AB8500_STD_HOST_SUSP 0x18
+
+/* Watchdog timeout constant */
+#define WD_TIMER 0x30 /* 4min */
+#define WD_KICK_INTERVAL (60 * HZ)
+
+/* Lowest charger voltage is 3.39V -> 0x4E */
+#define LOW_VOLT_REG 0x4E
+
+/* UsbLineStatus register - usb types */
+enum ab8500_charger_link_status {
+ USB_STAT_NOT_CONFIGURED,
+ USB_STAT_STD_HOST_NC,
+ USB_STAT_STD_HOST_C_NS,
+ USB_STAT_STD_HOST_C_S,
+ USB_STAT_HOST_CHG_NM,
+ USB_STAT_HOST_CHG_HS,
+ USB_STAT_HOST_CHG_HS_CHIRP,
+ USB_STAT_DEDICATED_CHG,
+ USB_STAT_ACA_RID_A,
+ USB_STAT_ACA_RID_B,
+ USB_STAT_ACA_RID_C_NM,
+ USB_STAT_ACA_RID_C_HS,
+ USB_STAT_ACA_RID_C_HS_CHIRP,
+ USB_STAT_HM_IDGND,
+ USB_STAT_RESERVED,
+ USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab8500_usb_state {
+ AB8500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
+ AB8500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
+ AB8500_BM_USB_STATE_CONFIGURED,
+ AB8500_BM_USB_STATE_SUSPEND,
+ AB8500_BM_USB_STATE_RESUME,
+ AB8500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB8500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05 50
+#define USB_CH_IP_CUR_LVL_0P09 98
+#define USB_CH_IP_CUR_LVL_0P19 193
+#define USB_CH_IP_CUR_LVL_0P29 290
+#define USB_CH_IP_CUR_LVL_0P38 380
+#define USB_CH_IP_CUR_LVL_0P45 450
+#define USB_CH_IP_CUR_LVL_0P5 500
+#define USB_CH_IP_CUR_LVL_0P6 600
+#define USB_CH_IP_CUR_LVL_0P7 700
+#define USB_CH_IP_CUR_LVL_0P8 800
+#define USB_CH_IP_CUR_LVL_0P9 900
+#define USB_CH_IP_CUR_LVL_1P0 1000
+#define USB_CH_IP_CUR_LVL_1P1 1100
+#define USB_CH_IP_CUR_LVL_1P3 1300
+#define USB_CH_IP_CUR_LVL_1P4 1400
+#define USB_CH_IP_CUR_LVL_1P5 1500
+
+#define VBAT_TRESH_IP_CUR_RED 3800
+
+#define to_ab8500_charger_usb_device_info(x) container_of((x), \
+ struct ab8500_charger, usb_chg)
+#define to_ab8500_charger_ac_device_info(x) container_of((x), \
+ struct ab8500_charger, ac_chg)
+
+/**
+ * struct ab8500_charger_interrupts - ab8500 interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_charger_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_charger_info {
+ int charger_connected;
+ int charger_online;
+ int charger_voltage;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct ab8500_charger_event_flags {
+ bool mainextchnotok;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool chgwdexp;
+ bool vbus_collapse;
+};
+
+struct ab8500_charger_usb_state {
+ bool usb_changed;
+ int usb_current;
+ enum ab8500_usb_state state;
+ spinlock_t usb_lock;
+};
+
+/**
+ * struct ab8500_charger - ab8500 Charger device information
+ * @dev: Pointer to the structure device
+ * @max_usb_in_curr: Max USB charger input current
+ * @vbus_detected: VBUS detected
+ * @vbus_detected_start:
+ * VBUS detected during startup
+ * @ac_conn: This will be true when the AC charger has been plugged
+ * @vddadc_en_ac: Indicate if VDD ADC supply is enabled because AC
+ * charger is enabled
+ * @vddadc_en_usb: Indicate if VDD ADC supply is enabled because USB
+ * charger is enabled
+ * @vbat Battery voltage
+ * @old_vbat Previously measured battery voltage
+ * @autopower Indicate if we should have automatic pwron after pwrloss
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the abx500_charger platform data
+ * @bat: Pointer to the abx500_bm platform data
+ * @flags: Structure for information about events triggered
+ * @usb_state: Structure for usb stack information
+ * @ac_chg: AC charger power supply
+ * @usb_chg: USB charger power supply
+ * @ac: Structure that holds the AC charger properties
+ * @usb: Structure that holds the USB charger properties
+ * @regu: Pointer to the struct regulator
+ * @charger_wq: Work queue for the IRQs and checking HW state
+ * @check_vbat_work Work for checking vbat threshold to adjust vbus current
+ * @check_hw_failure_work: Work for checking HW state
+ * @check_usbchgnotok_work: Work for checking USB charger not ok status
+ * @kick_wd_work: Work for kicking the charger watchdog in case
+ * of ABB rev 1.* due to the watchog logic bug
+ * @ac_work: Work for checking AC charger connection
+ * @detect_usb_type_work: Work for detecting the USB type connected
+ * @usb_link_status_work: Work for checking the new USB link status
+ * @usb_state_changed_work: Work for checking USB state
+ * @check_main_thermal_prot_work:
+ * Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ * Work for checking USB thermal status
+ */
+struct ab8500_charger {
+ struct device *dev;
+ int max_usb_in_curr;
+ bool vbus_detected;
+ bool vbus_detected_start;
+ bool ac_conn;
+ bool vddadc_en_ac;
+ bool vddadc_en_usb;
+ int vbat;
+ int old_vbat;
+ bool autopower;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct abx500_charger_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct ab8500_charger_event_flags flags;
+ struct ab8500_charger_usb_state usb_state;
+ struct ux500_charger ac_chg;
+ struct ux500_charger usb_chg;
+ struct ab8500_charger_info ac;
+ struct ab8500_charger_info usb;
+ struct regulator *regu;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_vbat_work;
+ struct delayed_work check_hw_failure_work;
+ struct delayed_work check_usbchgnotok_work;
+ struct delayed_work kick_wd_work;
+ struct work_struct ac_work;
+ struct work_struct detect_usb_type_work;
+ struct work_struct usb_link_status_work;
+ struct work_struct usb_state_changed_work;
+ struct work_struct check_main_thermal_prot_work;
+ struct work_struct check_usb_thermal_prot_work;
+ struct usb_phy *usb_phy;
+ struct notifier_block nb;
+};
+
+/* AC properties */
+static enum power_supply_property ab8500_charger_ac_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/* USB properties */
+static enum power_supply_property ab8500_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab8500_power_loss_handling - set how we handle powerloss.
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Magic nummbers are from STE HW department.
+ */
+static void ab8500_power_loss_handling(struct ab8500_charger *di)
+{
+ u8 reg;
+ int ret;
+
+ dev_dbg(di->dev, "Autopower : %d\n", di->autopower);
+
+ /* read the autopower register */
+ ret = abx500_get_register_interruptible(di->dev, 0x15, 0x00, &reg);
+ if (ret) {
+ dev_err(di->dev, "%d write failed\n", __LINE__);
+ return;
+ }
+
+ /* enable the OPT emulation registers */
+ ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
+ if (ret) {
+ dev_err(di->dev, "%d write failed\n", __LINE__);
+ return;
+ }
+
+ if (di->autopower)
+ reg |= 0x8;
+ else
+ reg &= ~0x8;
+
+ /* write back the changed value to autopower reg */
+ ret = abx500_set_register_interruptible(di->dev, 0x15, 0x00, reg);
+ if (ret) {
+ dev_err(di->dev, "%d write failed\n", __LINE__);
+ return;
+ }
+
+ /* disable the set OTP registers again */
+ ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
+ if (ret) {
+ dev_err(di->dev, "%d write failed\n", __LINE__);
+ return;
+ }
+}
+
+/**
+ * ab8500_power_supply_changed - a wrapper with local extentions for
+ * power_supply_changed
+ * @di: pointer to the ab8500_charger structure
+ * @psy: pointer to power_supply_that have changed.
+ *
+ */
+static void ab8500_power_supply_changed(struct ab8500_charger *di,
+ struct power_supply *psy)
+{
+ if (di->pdata->autopower_cfg) {
+ if (!di->usb.charger_connected &&
+ !di->ac.charger_connected &&
+ di->autopower) {
+ di->autopower = false;
+ ab8500_power_loss_handling(di);
+ } else if (!di->autopower &&
+ (di->ac.charger_connected ||
+ di->usb.charger_connected)) {
+ di->autopower = true;
+ ab8500_power_loss_handling(di);
+ }
+ }
+ power_supply_changed(psy);
+}
+
+static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
+ bool connected)
+{
+ if (connected != di->usb.charger_connected) {
+ dev_dbg(di->dev, "USB connected:%i\n", connected);
+ di->usb.charger_connected = connected;
+ sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
+ }
+}
+
+/**
+ * ab8500_charger_get_ac_voltage() - get ac charger voltage
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger voltage (on success)
+ */
+static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->ac.charger_connected) {
+ vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab8500_charger_ac_cv() - check if the main charger is in CV mode
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_ac_cv(struct ab8500_charger *di)
+{
+ u8 val;
+ int ret = 0;
+
+ /* Only check CV mode if the charger is online */
+ if (di->ac.charger_online) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_STATUS1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return 0;
+ }
+
+ if (val & MAIN_CH_CV_ON)
+ ret = 1;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_get_vbus_voltage() - get vbus voltage
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->usb.charger_connected) {
+ vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab8500_charger_get_usb_current() - get usb charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->usb.charger_online) {
+ ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab8500_charger_get_ac_current() - get ac charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the ac charger current.
+ * Returns ac current (on success) and error code on failure.
+ */
+static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->ac.charger_online) {
+ ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab8500_charger_usb_cv() - check if the usb charger is in CV mode
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_usb_cv(struct ab8500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ /* Only check CV mode if the charger is online */
+ if (di->usb.charger_online) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return 0;
+ }
+
+ if (val & USB_CH_CV_ON)
+ ret = 1;
+ else
+ ret = 0;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_detect_chargers() - Detect the connected chargers
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN no power supply is connected
+ * AC_PW_CONN if the AC power supply is connected
+ * USB_PW_CONN if the USB power supply is connected
+ * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
+ */
+static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
+{
+ int result = NO_PW_CONN;
+ int ret;
+ u8 val;
+
+ /* Check for AC charger */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_STATUS1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & MAIN_CH_DET)
+ result = AC_PW_CONN;
+
+ /* Check for USB charger */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
+ result |= USB_PW_CONN;
+
+ return result;
+}
+
+/**
+ * ab8500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di: pointer to the ab8500_charger structure
+ * @link_status: the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
+ enum ab8500_charger_link_status link_status)
+{
+ int ret = 0;
+
+ switch (link_status) {
+ case USB_STAT_STD_HOST_NC:
+ case USB_STAT_STD_HOST_C_NS:
+ case USB_STAT_STD_HOST_C_S:
+ dev_dbg(di->dev, "USB Type - Standard host is "
+ "detected through USB driver\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case USB_STAT_HOST_CHG_HS_CHIRP:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ case USB_STAT_HOST_CHG_HS:
+ case USB_STAT_ACA_RID_C_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ break;
+ case USB_STAT_ACA_RID_A:
+ /*
+ * Dedicated charger level minus maximum current accessory
+ * can consume (300mA). Closest level is 1100mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ break;
+ case USB_STAT_ACA_RID_B:
+ /*
+ * Dedicated charger level minus 120mA (20mA for ACA and
+ * 100mA for potential accessory). Closest level is 1300mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ case USB_STAT_HOST_CHG_NM:
+ case USB_STAT_ACA_RID_C_HS_CHIRP:
+ case USB_STAT_ACA_RID_C_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ case USB_STAT_RESERVED:
+ /*
+ * This state is used to indicate that VBUS has dropped below
+ * the detection level 4 times in a row. This is due to the
+ * charger output current is set to high making the charger
+ * voltage collapse. This have to be propagated through to
+ * chargalg. This is done using the property
+ * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+ */
+ di->flags.vbus_collapse = true;
+ dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -1;
+ break;
+ case USB_STAT_HM_IDGND:
+ case USB_STAT_NOT_CONFIGURED:
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ default:
+ dev_err(di->dev, "USB Type - Unknown\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ };
+
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_read_usb_type() - read the type of usb connected
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ /* get the USB type */
+ val = (val & AB8500_USB_LINK_STATUS) >> 3;
+ ret = ab8500_charger_max_usb_curr(di,
+ (enum ab8500_charger_link_status) val);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_detect_usb_type() - get the type of usb connected
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
+{
+ int i, ret;
+ u8 val;
+
+ /*
+ * On getting the VBUS rising edge detect interrupt there
+ * is a 250ms delay after which the register UsbLineStatus
+ * is filled with valid data.
+ */
+ for (i = 0; i < 10; i++) {
+ msleep(250);
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
+ &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ /*
+ * Until the IT source register is read the UsbLineStatus
+ * register is not updated, hence doing the same
+ * Revisit this:
+ */
+
+ /* get the USB type */
+ val = (val & AB8500_USB_LINK_STATUS) >> 3;
+ if (val)
+ break;
+ }
+ ret = ab8500_charger_max_usb_curr(di,
+ (enum ab8500_charger_link_status) val);
+
+ return ret;
+}
+
+/*
+ * This array maps the raw hex value to charger voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_voltage_map[] = {
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3875 ,
+ 3900 ,
+ 3925 ,
+ 3950 ,
+ 3975 ,
+ 4000 ,
+ 4025 ,
+ 4050 ,
+ 4060 ,
+ 4070 ,
+ 4080 ,
+ 4090 ,
+ 4100 ,
+ 4110 ,
+ 4120 ,
+ 4130 ,
+ 4140 ,
+ 4150 ,
+ 4160 ,
+ 4170 ,
+ 4180 ,
+ 4190 ,
+ 4200 ,
+ 4210 ,
+ 4220 ,
+ 4230 ,
+ 4240 ,
+ 4250 ,
+ 4260 ,
+ 4270 ,
+ 4280 ,
+ 4290 ,
+ 4300 ,
+ 4310 ,
+ 4320 ,
+ 4330 ,
+ 4340 ,
+ 4350 ,
+ 4360 ,
+ 4370 ,
+ 4380 ,
+ 4390 ,
+ 4400 ,
+ 4410 ,
+ 4420 ,
+ 4430 ,
+ 4440 ,
+ 4450 ,
+ 4460 ,
+ 4470 ,
+ 4480 ,
+ 4490 ,
+ 4500 ,
+ 4510 ,
+ 4520 ,
+ 4530 ,
+ 4540 ,
+ 4550 ,
+ 4560 ,
+ 4570 ,
+ 4580 ,
+ 4590 ,
+ 4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_current_map[] = {
+ 100 ,
+ 200 ,
+ 300 ,
+ 400 ,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000 ,
+ 1100 ,
+ 1200 ,
+ 1300 ,
+ 1400 ,
+ 1500 ,
+};
+
+/*
+ * This array maps the raw hex value to VBUS input current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_vbus_in_curr_map[] = {
+ USB_CH_IP_CUR_LVL_0P05,
+ USB_CH_IP_CUR_LVL_0P09,
+ USB_CH_IP_CUR_LVL_0P19,
+ USB_CH_IP_CUR_LVL_0P29,
+ USB_CH_IP_CUR_LVL_0P38,
+ USB_CH_IP_CUR_LVL_0P45,
+ USB_CH_IP_CUR_LVL_0P5,
+ USB_CH_IP_CUR_LVL_0P6,
+ USB_CH_IP_CUR_LVL_0P7,
+ USB_CH_IP_CUR_LVL_0P8,
+ USB_CH_IP_CUR_LVL_0P9,
+ USB_CH_IP_CUR_LVL_1P0,
+ USB_CH_IP_CUR_LVL_1P1,
+ USB_CH_IP_CUR_LVL_1P3,
+ USB_CH_IP_CUR_LVL_1P4,
+ USB_CH_IP_CUR_LVL_1P5,
+};
+
+static int ab8500_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.5V */
+ if (voltage < ab8500_charger_voltage_map[0])
+ return LOW_VOLT_REG;
+
+ for (i = 1; i < ARRAY_SIZE(ab8500_charger_voltage_map); i++) {
+ if (voltage < ab8500_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_voltage_map) - 1;
+ if (voltage == ab8500_charger_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab8500_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab8500_charger_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_current_map); i++) {
+ if (curr < ab8500_charger_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_current_map) - 1;
+ if (curr == ab8500_charger_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab8500_vbus_in_curr_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab8500_charger_vbus_in_curr_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_vbus_in_curr_map); i++) {
+ if (curr < ab8500_charger_vbus_in_curr_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_vbus_in_curr_map) - 1;
+ if (curr == ab8500_charger_vbus_in_curr_map[i])
+ return i;
+ else
+ return -1;
+}
+
+/**
+ * ab8500_charger_get_usb_cur() - get usb current
+ * @di: pointer to the ab8500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
+{
+ switch (di->usb_state.usb_current) {
+ case 100:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case 200:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+ break;
+ case 300:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+ break;
+ case 400:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+ break;
+ case 500:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ default:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ return -1;
+ break;
+ };
+ return 0;
+}
+
+/**
+ * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di: pointer to the ab8500_charger structure
+ * @ich_in: charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
+ int ich_in)
+{
+ int ret;
+ int input_curr_index;
+ int min_value;
+
+ /* We should always use to lowest current limit */
+ min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+ switch (min_value) {
+ case 100:
+ if (di->vbat < VBAT_TRESH_IP_CUR_RED)
+ min_value = USB_CH_IP_CUR_LVL_0P05;
+ break;
+ case 500:
+ if (di->vbat < VBAT_TRESH_IP_CUR_RED)
+ min_value = USB_CH_IP_CUR_LVL_0P45;
+ break;
+ default:
+ break;
+ }
+
+ input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
+ if (input_curr_index < 0) {
+ dev_err(di->dev, "VBUS input current limit too high\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_USBCH_IPT_CRNTLVL_REG,
+ input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
+ if (ret)
+ dev_err(di->dev, "%s write failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_led_en() - turn on/off chargign led
+ * @di: pointer to the ab8500_charger structure
+ * @on: flag to turn on/off the chargign led
+ *
+ * Power ON/OFF charging LED indication
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_led_en(struct ab8500_charger *di, int on)
+{
+ int ret;
+
+ if (on) {
+ /* Power ON charging LED indicator, set LED current to 5mA */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_CTRL,
+ (LED_IND_CUR_5MA | LED_INDICATOR_PWM_ENA));
+ if (ret) {
+ dev_err(di->dev, "Power ON LED failed\n");
+ return ret;
+ }
+ /* LED indicator PWM duty cycle 252/256 */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_DUTY,
+ LED_INDICATOR_PWM_DUTY_252_256);
+ if (ret) {
+ dev_err(di->dev, "Set LED PWM duty cycle failed\n");
+ return ret;
+ }
+ } else {
+ /* Power off charging LED indicator */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_CTRL,
+ LED_INDICATOR_PWM_DIS);
+ if (ret) {
+ dev_err(di->dev, "Power-off LED failed\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_ac_en() - enable or disable ac charging
+ * @di: pointer to the ab8500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @iset: charging current
+ *
+ * Enable/Disable AC/Mains charging and turns on/off the charging led
+ * respectively.
+ **/
+static int ab8500_charger_ac_en(struct ux500_charger *charger,
+ int enable, int vset, int iset)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ int input_curr_index;
+ u8 overshoot = 0;
+
+ struct ab8500_charger *di = to_ab8500_charger_ac_device_info(charger);
+
+ if (enable) {
+ /* Check if AC is connected */
+ if (!di->ac.charger_connected) {
+ dev_err(di->dev, "AC charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable AC charging */
+ dev_dbg(di->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+
+ /*
+ * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+ * will be triggered everytime we enable the VDD ADC supply.
+ * This will turn off charging for a short while.
+ * It can be avoided by having the supply on when
+ * there is a charger enabled. Normally the VDD ADC supply
+ * is enabled everytime a GPADC conversion is triggered. We will
+ * force it to be enabled from this driver to have
+ * the GPADC module independant of the AB8500 chargers
+ */
+ if (!di->vddadc_en_ac) {
+ regulator_enable(di->regu);
+ di->vddadc_en_ac = true;
+ }
+
+ /* Check if the requested voltage or current is valid */
+ volt_index = ab8500_voltage_to_regval(vset);
+ curr_index = ab8500_current_to_regval(iset);
+ input_curr_index = ab8500_current_to_regval(
+ di->bat->chg_params->ac_curr_max);
+ if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
+ dev_err(di->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ /* ChVoltLevel: maximum battery charging voltage */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* MainChInputCurr: current that can be drawn from the charger*/
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_MCH_IPT_CURLVL_REG,
+ input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Check if VBAT overshoot control should be enabled */
+ if (!di->bat->enable_overshoot)
+ overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
+
+ /* Enable Main Charger */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_MCH_CTRL1, MAIN_CH_ENA | overshoot);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Power on charging LED indication */
+ ret = ab8500_charger_led_en(di, true);
+ if (ret < 0)
+ dev_err(di->dev, "failed to enable LED\n");
+
+ di->ac.charger_online = 1;
+ } else {
+ /* Disable AC charging */
+ if (is_ab8500_1p1_or_earlier(di->parent)) {
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the
+ * watchdog logic. That means we have to continously
+ * kick the charger watchdog even when no charger is
+ * connected. This is only valid once the AC charger
+ * has been enabled. This is a bug that is not handled
+ * by the algorithm and the watchdog have to be kicked
+ * by the charger driver when the AC charger
+ * is disabled
+ */
+ if (di->ac_conn) {
+ queue_delayed_work(di->charger_wq,
+ &di->kick_wd_work,
+ round_jiffies(WD_KICK_INTERVAL));
+ }
+
+ /*
+ * We can't turn off charging completely
+ * due to a bug in AB8500 cut1.
+ * If we do, charging will not start again.
+ * That is why we set the lowest voltage
+ * and current possible
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, CH_VOL_LVL_3P5);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+ } else {
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_MCH_CTRL1, 0);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0)
+ dev_err(di->dev, "failed to disable LED\n");
+
+ di->ac.charger_online = 0;
+ di->ac.wd_expired = false;
+
+ /* Disable regulator if enabled */
+ if (di->vddadc_en_ac) {
+ regulator_disable(di->regu);
+ di->vddadc_en_ac = false;
+ }
+
+ dev_dbg(di->dev, "%s Disabled AC charging\n", __func__);
+ }
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_usb_en() - enable usb charging
+ * @di: pointer to the ab8500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @ich_out: charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_en(struct ux500_charger *charger,
+ int enable, int vset, int ich_out)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ u8 overshoot = 0;
+
+ struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
+
+ if (enable) {
+ /* Check if USB is connected */
+ if (!di->usb.charger_connected) {
+ dev_err(di->dev, "USB charger not connected\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+ * will be triggered everytime we enable the VDD ADC supply.
+ * This will turn off charging for a short while.
+ * It can be avoided by having the supply on when
+ * there is a charger enabled. Normally the VDD ADC supply
+ * is enabled everytime a GPADC conversion is triggered. We will
+ * force it to be enabled from this driver to have
+ * the GPADC module independant of the AB8500 chargers
+ */
+ if (!di->vddadc_en_usb) {
+ regulator_enable(di->regu);
+ di->vddadc_en_usb = true;
+ }
+
+ /* Enable USB charging */
+ dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+ /* Check if the requested voltage or current is valid */
+ volt_index = ab8500_voltage_to_regval(vset);
+ curr_index = ab8500_current_to_regval(ich_out);
+ if (volt_index < 0 || curr_index < 0) {
+ dev_err(di->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "setting USBChInputCurr failed\n");
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* Check if VBAT overshoot control should be enabled */
+ if (!di->bat->enable_overshoot)
+ overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
+
+ /* Enable USB Charger */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* If success power on charging LED indication */
+ ret = ab8500_charger_led_en(di, true);
+ if (ret < 0)
+ dev_err(di->dev, "failed to enable LED\n");
+
+ queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
+
+ di->usb.charger_online = 1;
+ } else {
+ /* Disable USB charging */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL1_REG, 0);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0)
+ dev_err(di->dev, "failed to disable LED\n");
+
+ di->usb.charger_online = 0;
+ di->usb.wd_expired = false;
+
+ /* Disable regulator if enabled */
+ if (di->vddadc_en_usb) {
+ regulator_disable(di->regu);
+ di->vddadc_en_usb = false;
+ }
+
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+
+ /* Cancel any pending Vbat check work */
+ if (delayed_work_pending(&di->check_vbat_work))
+ cancel_delayed_work(&di->check_vbat_work);
+
+ }
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_watchdog_kick() - kick charger watchdog
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct ab8500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ di = to_ab8500_charger_ac_device_info(charger);
+ else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab8500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_update_charger_current() - update charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret;
+ int curr_index;
+ struct ab8500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ di = to_ab8500_charger_ac_device_info(charger);
+ else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab8500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = ab8500_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(di->dev,
+ "Charger current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Reset the main and usb drop input current measurement counter */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARGER_CTRL,
+ 0x1);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_charger *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+ struct ux500_charger *usb_chg;
+
+ usb_chg = (struct ux500_charger *)data;
+ psy = &usb_chg->psy;
+
+ di = to_ab8500_charger_usb_device_info(usb_chg);
+
+ ext = dev_get_drvdata(dev);
+
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->vbat = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_check_vbat_work() - keep vbus current within spec
+ * @work pointer to the work_struct structure
+ *
+ * Due to a asic bug it is necessary to lower the input current to the vbus
+ * charger when charging with at some specific levels. This issue is only valid
+ * for below a certain battery voltage. This function makes sure that the
+ * the allowed current limit isn't exceeded.
+ */
+static void ab8500_charger_check_vbat_work(struct work_struct *work)
+{
+ int t = 10;
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_vbat_work.work);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->usb_chg.psy, ab8500_charger_get_ext_psy_data);
+
+ /* First run old_vbat is 0. */
+ if (di->old_vbat == 0)
+ di->old_vbat = di->vbat;
+
+ if (!((di->old_vbat <= VBAT_TRESH_IP_CUR_RED &&
+ di->vbat <= VBAT_TRESH_IP_CUR_RED) ||
+ (di->old_vbat > VBAT_TRESH_IP_CUR_RED &&
+ di->vbat > VBAT_TRESH_IP_CUR_RED))) {
+
+ dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
+ " old: %d\n", di->max_usb_in_curr, di->vbat,
+ di->old_vbat);
+ ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ di->old_vbat = di->vbat;
+
+ /*
+ * No need to check the battery voltage every second when not close to
+ * the threshold.
+ */
+ if (di->vbat < (VBAT_TRESH_IP_CUR_RED + 100) &&
+ (di->vbat > (VBAT_TRESH_IP_CUR_RED - 100)))
+ t = 1;
+
+ queue_delayed_work(di->charger_wq, &di->check_vbat_work, t * HZ);
+}
+
+/**
+ * ab8500_charger_check_hw_failure_work() - check main charger failure
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_hw_failure_work.work);
+
+ /* Check if the status bits for HW failure is still active */
+ if (di->flags.mainextchnotok) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & MAIN_CH_NOK)) {
+ di->flags.mainextchnotok = false;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ }
+ }
+ if (di->flags.vbus_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & VBUS_OVV_TH)) {
+ di->flags.vbus_ovv = false;
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+ }
+ /* If we still have a failure, schedule a new check */
+ if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab8500_charger_kick_watchdog_work() - kick the watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog.
+ *
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+static void ab8500_charger_kick_watchdog_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, kick_wd_work.work);
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ /* Schedule a new watchdog kick */
+ queue_delayed_work(di->charger_wq,
+ &di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL));
+}
+
+/**
+ * ab8500_charger_ac_work() - work to get and set main charger status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_ac_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, ac_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if the main charger is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (ret & AC_PW_CONN) {
+ di->ac.charger_connected = 1;
+ di->ac_conn = true;
+ } else {
+ di->ac.charger_connected = 0;
+ }
+
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
+}
+
+/**
+ * ab8500_charger_detect_usb_type_work() - work to detect USB type
+ * @work: Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, detect_usb_type_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+
+ if (is_ab8500_1p1_or_earlier(di->parent)) {
+ ret = ab8500_charger_detect_usb_type(di);
+ if (!ret) {
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di,
+ &di->usb_chg.psy);
+ }
+ } else {
+ /* For ABB cut2.0 and onwards we have an IRQ,
+ * USB_LINK_STATUS that will be triggered when the USB
+ * link status changes. The exception is USB connected
+ * during startup. Then we don't get a
+ * USB_LINK_STATUS IRQ
+ */
+ if (di->vbus_detected_start) {
+ di->vbus_detected_start = false;
+ ret = ab8500_charger_detect_usb_type(di);
+ if (!ret) {
+ ab8500_charger_set_usb_connected(di,
+ true);
+ ab8500_power_supply_changed(di,
+ &di->usb_chg.psy);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * ab8500_charger_usb_link_status_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_usb_link_status_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, usb_link_status_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ ret = ab8500_charger_read_usb_type(di);
+ if (!ret) {
+ /* Update maximum input current */
+ ret = ab8500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ } else if (ret == -ENXIO) {
+ /* No valid charger type detected */
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+ }
+}
+
+static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, usb_state_changed_work);
+
+ if (!di->vbus_detected)
+ return;
+
+ spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+ di->usb_state.usb_changed = false;
+ spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ msleep(250);
+
+ if (di->usb_state.usb_changed)
+ return;
+
+ dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current);
+
+ switch (di->usb_state.state) {
+ case AB8500_BM_USB_STATE_RESET_HS:
+ case AB8500_BM_USB_STATE_RESET_FS:
+ case AB8500_BM_USB_STATE_SUSPEND:
+ case AB8500_BM_USB_STATE_MAX:
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ break;
+
+ case AB8500_BM_USB_STATE_RESUME:
+ /*
+ * when suspend->resume there should be delay
+ * of 1sec for enabling charging
+ */
+ msleep(1000);
+ /* Intentional fall through */
+ case AB8500_BM_USB_STATE_CONFIGURED:
+ /*
+ * USB is configured, enable charging with the charging
+ * input current obtained from USB driver
+ */
+ if (!ab8500_charger_get_usb_cur(di)) {
+ /* Update maximum input current */
+ ret = ab8500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+ break;
+
+ default:
+ break;
+ };
+}
+
+/**
+ * ab8500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+ bool prev_status;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_usbchgnotok_work.work);
+
+ /* Check if the status bit for usbchargernotok is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ prev_status = di->flags.usbchargernotok;
+
+ if (reg_value & VBUS_CH_NOK) {
+ di->flags.usbchargernotok = true;
+ /* Check again in 1sec */
+ queue_delayed_work(di->charger_wq,
+ &di->check_usbchgnotok_work, HZ);
+ } else {
+ di->flags.usbchargernotok = false;
+ di->flags.vbus_collapse = false;
+ }
+
+ if (prev_status != di->flags.usbchargernotok)
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_main_thermal_prot_work() - check main thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the Main thermal prot status
+ */
+static void ab8500_charger_check_main_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_main_thermal_prot_work);
+
+ /* Check if the status bit for main_thermal_prot is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & MAIN_CH_TH_PROT)
+ di->flags.main_thermal_prot = true;
+ else
+ di->flags.main_thermal_prot = false;
+
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab8500_charger_check_usb_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_usb_thermal_prot_work);
+
+ /* Check if the status bit for usb_thermal_prot is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & USB_CH_TH_PROT)
+ di->flags.usb_thermal_prot = true;
+ else
+ di->flags.usb_thermal_prot = false;
+
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_mainchunplugdet_handler() - main charger unplugged
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger unplugged\n");
+ queue_work(di->charger_wq, &di->ac_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchplugdet_handler() - main charger plugged
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger plugged\n");
+ queue_work(di->charger_wq, &di->ac_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainextchnotok_handler() - main charger not ok
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainextchnotok_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger not ok\n");
+ di->flags.mainextchnotok = true;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotr_handler() - Die temp is above main charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above Main charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotf_handler() - Die temp is below main charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp ok for Main charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS falling detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ di->vbus_detected = true;
+ dev_dbg(di->dev, "VBUS rising detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "USB link status changed\n");
+
+ queue_work(di->charger_wq, &di->usb_link_status_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotf_handler() - Die temp is below usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp ok for USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Not allowed USB charger detected\n");
+ queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Charger watchdog expired\n");
+
+ /*
+ * The charger that was online when the watchdog expired
+ * needs to be restarted for charging to start again
+ */
+ if (di->ac.charger_online) {
+ di->ac.wd_expired = true;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ }
+ if (di->usb.charger_online) {
+ di->usb.wd_expired = true;
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusovv_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS overvoltage detected\n");
+ di->flags.vbus_ovv = true;
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_ac_get_property() - get the ac/mains properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the ac/mains
+ * properties by reading the sysfs files.
+ * AC/Mains properties are online, present and voltage.
+ * online: ac/mains charging is in progress or not
+ * present: presence of the ac/mains
+ * voltage: AC/Mains voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_charger *di;
+
+ di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.mainextchnotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->ac.wd_expired || di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.main_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->ac.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->ac.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
+ val->intval = di->ac.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to indicate when CV mode is entered
+ * for the AC charger
+ */
+ di->ac.cv_active = ab8500_charger_ac_cv(di);
+ val->intval = di->ac.cv_active;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab8500_charger_get_ac_current(di) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_usb_get_property() - get the usb properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online: usb charging is in progress or not
+ * present: presence of the usb
+ * voltage: vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_charger *di;
+
+ di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.usbchargernotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->ac.wd_expired || di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.usb_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (di->flags.vbus_ovv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->usb.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
+ val->intval = di->usb.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to indicate when CV mode is entered
+ * for the USB charger
+ */
+ di->usb.cv_active = ab8500_charger_usb_cv(di);
+ val->intval = di->usb.cv_active;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab8500_charger_get_usb_current(di) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ /*
+ * This property is used to indicate when VBUS has collapsed
+ * due to too high output current from the USB charger
+ */
+ if (di->flags.vbus_collapse)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_init_hw_registers() - Set up charger related registers
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
+{
+ int ret = 0;
+
+ /* Setup maximum charger current and voltage for ABB cut2.0 */
+ if (!is_ab8500_1p1_or_earlier(di->parent)) {
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_MAX_REG, CH_VOL_LVL_4P6);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to set CH_VOLT_LVL_MAX_REG\n");
+ goto out;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_MAX_REG, CH_OP_CUR_LVL_1P6);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to set CH_OPT_CRNTLVL_MAX_REG\n");
+ goto out;
+ }
+ }
+
+ /* VBUS OVV set to 6.3V and enable automatic current limitiation */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL2_REG,
+ VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to set VBUS OVV\n");
+ goto out;
+ }
+
+ /* Enable main watchdog in OTP */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_OTP_EMUL, AB8500_OTP_CONF_15, OTP_ENABLE_WD);
+ if (ret) {
+ dev_err(di->dev, "failed to enable main WD in OTP\n");
+ goto out;
+ }
+
+ /* Enable main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_ENA);
+ if (ret) {
+ dev_err(di->dev, "faile to enable main watchdog\n");
+ goto out;
+ }
+
+ /*
+ * Due to internal synchronisation, Enable and Kick watchdog bits
+ * cannot be enabled in a single write.
+ * A minimum delay of 2*32 kHz period (62.5µs) must be inserted
+ * between writing Enable then Kick bits.
+ */
+ udelay(63);
+
+ /* Kick main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG,
+ (MAIN_WDOG_ENA | MAIN_WDOG_KICK));
+ if (ret) {
+ dev_err(di->dev, "failed to kick main watchdog\n");
+ goto out;
+ }
+
+ /* Disable main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_DIS);
+ if (ret) {
+ dev_err(di->dev, "failed to disable main watchdog\n");
+ goto out;
+ }
+
+ /* Set watchdog timeout */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_WD_TIMER_REG, WD_TIMER);
+ if (ret) {
+ dev_err(di->dev, "failed to set charger watchdog timeout\n");
+ goto out;
+ }
+
+ /* Backup battery voltage and current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_RTC,
+ AB8500_RTC_BACKUP_CHG_REG,
+ di->bat->bkup_bat_v |
+ di->bat->bkup_bat_i);
+ if (ret) {
+ dev_err(di->dev, "failed to setup backup battery charging\n");
+ goto out;
+ }
+
+ /* Enable backup battery charging */
+ abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_RTC, AB8500_RTC_CTRL_REG,
+ RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
+ if (ret < 0)
+ dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+out:
+ return ret;
+}
+
+/*
+ * ab8500 charger driver interrupts and their respective isr
+ */
+static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
+ {"MAIN_CH_UNPLUG_DET", ab8500_charger_mainchunplugdet_handler},
+ {"MAIN_CHARGE_PLUG_DET", ab8500_charger_mainchplugdet_handler},
+ {"MAIN_EXT_CH_NOT_OK", ab8500_charger_mainextchnotok_handler},
+ {"MAIN_CH_TH_PROT_R", ab8500_charger_mainchthprotr_handler},
+ {"MAIN_CH_TH_PROT_F", ab8500_charger_mainchthprotf_handler},
+ {"VBUS_DET_F", ab8500_charger_vbusdetf_handler},
+ {"VBUS_DET_R", ab8500_charger_vbusdetr_handler},
+ {"USB_LINK_STATUS", ab8500_charger_usblinkstatus_handler},
+ {"USB_CH_TH_PROT_R", ab8500_charger_usbchthprotr_handler},
+ {"USB_CH_TH_PROT_F", ab8500_charger_usbchthprotf_handler},
+ {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
+ {"VBUS_OVV", ab8500_charger_vbusovv_handler},
+ {"CH_WD_EXP", ab8500_charger_chwdexp_handler},
+};
+
+static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *power)
+{
+ struct ab8500_charger *di =
+ container_of(nb, struct ab8500_charger, nb);
+ enum ab8500_usb_state bm_usb_state;
+ unsigned mA = *((unsigned *)power);
+
+ if (event != USB_EVENT_VBUS) {
+ dev_dbg(di->dev, "not a standard host, returning\n");
+ return NOTIFY_DONE;
+ }
+
+ /* TODO: State is fabricate here. See if charger really needs USB
+ * state or if mA is enough
+ */
+ if ((di->usb_state.usb_current == 2) && (mA > 2))
+ bm_usb_state = AB8500_BM_USB_STATE_RESUME;
+ else if (mA == 0)
+ bm_usb_state = AB8500_BM_USB_STATE_RESET_HS;
+ else if (mA == 2)
+ bm_usb_state = AB8500_BM_USB_STATE_SUSPEND;
+ else if (mA >= 8) /* 8, 100, 500 */
+ bm_usb_state = AB8500_BM_USB_STATE_CONFIGURED;
+ else /* Should never occur */
+ bm_usb_state = AB8500_BM_USB_STATE_RESET_FS;
+
+ dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+ __func__, bm_usb_state, mA);
+
+ spin_lock(&di->usb_state.usb_lock);
+ di->usb_state.usb_changed = true;
+ spin_unlock(&di->usb_state.usb_lock);
+
+ di->usb_state.state = bm_usb_state;
+ di->usb_state.usb_current = mA;
+
+ queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab8500_charger_resume(struct platform_device *pdev)
+{
+ int ret;
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+ if (di->ac_conn && is_ab8500_1p1_or_earlier(di->parent)) {
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ /* If not already pending start a new timer */
+ if (!delayed_work_pending(
+ &di->kick_wd_work)) {
+ queue_delayed_work(di->charger_wq, &di->kick_wd_work,
+ round_jiffies(WD_KICK_INTERVAL));
+ }
+ }
+
+ /* If we still have a HW failure, schedule a new check */
+ if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, 0);
+ }
+
+ return 0;
+}
+
+static int ab8500_charger_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ /* Cancel any pending HW failure check */
+ if (delayed_work_pending(&di->check_hw_failure_work))
+ cancel_delayed_work(&di->check_hw_failure_work);
+
+ return 0;
+}
+#else
+#define ab8500_charger_suspend NULL
+#define ab8500_charger_resume NULL
+#endif
+
+static int __devexit ab8500_charger_remove(struct platform_device *pdev)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+ int i, irq, ret;
+
+ /* Disable AC charging */
+ ab8500_charger_ac_en(&di->ac_chg, false, 0, 0);
+
+ /* Disable USB charging */
+ ab8500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* disable the regulator */
+ regulator_put(di->regu);
+
+ /* Backup battery voltage and current disable */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
+ if (ret < 0)
+ dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+ usb_unregister_notifier(di->usb_phy, &di->nb);
+ usb_put_transceiver(di->usb_phy);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->charger_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->usb_chg.psy);
+ power_supply_unregister(&di->ac_chg.psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_charger_probe(struct platform_device *pdev)
+{
+ int irq, i, charger_status, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab8500_charger *di =
+ kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* initialize lock */
+ spin_lock_init(&di->usb_state.usb_lock);
+
+ /* get charger specific platform data */
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->charger;
+
+ if (!di->pdata) {
+ dev_err(di->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ di->bat = plat_data->battery;
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ di->autopower = false;
+
+ /* AC supply */
+ /* power_supply base class */
+ di->ac_chg.psy.name = "ab8500_ac";
+ di->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
+ di->ac_chg.psy.properties = ab8500_charger_ac_props;
+ di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
+ di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
+ di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->ac_chg.ops.enable = &ab8500_charger_ac_en;
+ di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+ di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+ di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
+ ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+ di->ac_chg.max_out_curr = ab8500_charger_current_map[
+ ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+ /* USB supply */
+ /* power_supply base class */
+ di->usb_chg.psy.name = "ab8500_usb";
+ di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+ di->usb_chg.psy.properties = ab8500_charger_usb_props;
+ di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
+ di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
+ di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->usb_chg.ops.enable = &ab8500_charger_usb_en;
+ di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+ di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+ di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
+ ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+ di->usb_chg.max_out_curr = ab8500_charger_current_map[
+ ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+
+ /* Create a work queue for the charger */
+ di->charger_wq =
+ create_singlethread_workqueue("ab8500_charger_wq");
+ if (di->charger_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ ab8500_charger_check_hw_failure_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ ab8500_charger_check_usbchargernotok_work);
+
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+ ab8500_charger_kick_watchdog_work);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+ ab8500_charger_check_vbat_work);
+
+ /* Init work for charger detection */
+ INIT_WORK(&di->usb_link_status_work,
+ ab8500_charger_usb_link_status_work);
+ INIT_WORK(&di->ac_work, ab8500_charger_ac_work);
+ INIT_WORK(&di->detect_usb_type_work,
+ ab8500_charger_detect_usb_type_work);
+
+ INIT_WORK(&di->usb_state_changed_work,
+ ab8500_charger_usb_state_changed_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&di->check_main_thermal_prot_work,
+ ab8500_charger_check_main_thermal_prot_work);
+ INIT_WORK(&di->check_usb_thermal_prot_work,
+ ab8500_charger_check_usb_thermal_prot_work);
+
+ /*
+ * VDD ADC supply needs to be enabled from this driver when there
+ * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+ * interrupts during charging
+ */
+ di->regu = regulator_get(di->dev, "vddadc");
+ if (IS_ERR(di->regu)) {
+ ret = PTR_ERR(di->regu);
+ dev_err(di->dev, "failed to get vddadc regulator\n");
+ goto free_charger_wq;
+ }
+
+
+ /* Initialize OVV, and other registers */
+ ret = ab8500_charger_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize ABB registers\n");
+ goto free_regulator;
+ }
+
+ /* Register AC charger class */
+ ret = power_supply_register(di->dev, &di->ac_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register AC charger\n");
+ goto free_regulator;
+ }
+
+ /* Register USB charger class */
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_ac;
+ }
+
+ di->usb_phy = usb_get_transceiver();
+ if (!di->usb_phy) {
+ dev_err(di->dev, "failed to get usb transceiver\n");
+ ret = -EINVAL;
+ goto free_usb;
+ }
+ di->nb.notifier_call = ab8500_charger_usb_notifier_call;
+ ret = usb_register_notifier(di->usb_phy, &di->nb);
+ if (ret) {
+ dev_err(di->dev, "failed to register usb notifier\n");
+ goto put_usb_phy;
+ }
+
+ /* Identify the connected charger types during startup */
+ charger_status = ab8500_charger_detect_chargers(di);
+ if (charger_status & AC_PW_CONN) {
+ di->ac.charger_connected = 1;
+ di->ac_conn = true;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
+ }
+
+ if (charger_status & USB_PW_CONN) {
+ dev_dbg(di->dev, "VBUS Detect during startup\n");
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->detect_usb_type_work);
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_charger_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_charger_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ return ret;
+
+free_irq:
+ usb_unregister_notifier(di->usb_phy, &di->nb);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+put_usb_phy:
+ usb_put_transceiver(di->usb_phy);
+free_usb:
+ power_supply_unregister(&di->usb_chg.psy);
+free_ac:
+ power_supply_unregister(&di->ac_chg.psy);
+free_regulator:
+ regulator_put(di->regu);
+free_charger_wq:
+ destroy_workqueue(di->charger_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_charger_driver = {
+ .probe = ab8500_charger_probe,
+ .remove = __devexit_p(ab8500_charger_remove),
+ .suspend = ab8500_charger_suspend,
+ .resume = ab8500_charger_resume,
+ .driver = {
+ .name = "ab8500-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_charger_init(void)
+{
+ return platform_driver_register(&ab8500_charger_driver);
+}
+
+static void __exit ab8500_charger_exit(void)
+{
+ platform_driver_unregister(&ab8500_charger_driver);
+}
+
+subsys_initcall_sync(ab8500_charger_init);
+module_exit(ab8500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-charger");
+MODULE_DESCRIPTION("AB8500 charger management driver");
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
new file mode 100644
index 000000000000..c22f2f05657e
--- /dev/null
+++ b/drivers/power/ab8500_fg.c
@@ -0,0 +1,2637 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2012
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Author:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ * Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/time.h>
+#include <linux/completion.h>
+
+#define MILLI_TO_MICRO 1000
+#define FG_LSB_IN_MA 1627
+#define QLSB_NANO_AMP_HOURS_X10 1129
+#define INS_CURR_TIMEOUT (3 * HZ)
+
+#define SEC_TO_SAMPLE(S) (S * 4)
+
+#define NBR_AVG_SAMPLES 20
+
+#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+
+#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
+#define BATT_OK_MIN 2360 /* mV */
+#define BATT_OK_INCREMENT 50 /* mV */
+#define BATT_OK_MAX_NR_INCREMENTS 0xE
+
+/* FG constants */
+#define BATT_OVV 0x01
+
+#define interpolate(x, x1, y1, x2, y2) \
+ ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab8500_fg_device_info(x) container_of((x), \
+ struct ab8500_fg, fg_psy);
+
+/**
+ * struct ab8500_fg_interrupts - ab8500 fg interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_fg_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab8500_fg_discharge_state {
+ AB8500_FG_DISCHARGE_INIT,
+ AB8500_FG_DISCHARGE_INITMEASURING,
+ AB8500_FG_DISCHARGE_INIT_RECOVERY,
+ AB8500_FG_DISCHARGE_RECOVERY,
+ AB8500_FG_DISCHARGE_READOUT_INIT,
+ AB8500_FG_DISCHARGE_READOUT,
+ AB8500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+ "DISCHARGE_INIT",
+ "DISCHARGE_INITMEASURING",
+ "DISCHARGE_INIT_RECOVERY",
+ "DISCHARGE_RECOVERY",
+ "DISCHARGE_READOUT_INIT",
+ "DISCHARGE_READOUT",
+ "DISCHARGE_WAKEUP",
+};
+
+enum ab8500_fg_charge_state {
+ AB8500_FG_CHARGE_INIT,
+ AB8500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+ "CHARGE_INIT",
+ "CHARGE_READOUT",
+};
+
+enum ab8500_fg_calibration_state {
+ AB8500_FG_CALIB_INIT,
+ AB8500_FG_CALIB_WAIT,
+ AB8500_FG_CALIB_END,
+};
+
+struct ab8500_fg_avg_cap {
+ int avg;
+ int samples[NBR_AVG_SAMPLES];
+ __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ int pos;
+ int nbr_samples;
+ int sum;
+};
+
+struct ab8500_fg_battery_capacity {
+ int max_mah_design;
+ int max_mah;
+ int mah;
+ int permille;
+ int level;
+ int prev_mah;
+ int prev_percent;
+ int prev_level;
+ int user_mah;
+};
+
+struct ab8500_fg_flags {
+ bool fg_enabled;
+ bool conv_done;
+ bool charging;
+ bool fully_charged;
+ bool force_full;
+ bool low_bat_delay;
+ bool low_bat;
+ bool bat_ovv;
+ bool batt_unknown;
+ bool calibrate;
+ bool user_cap;
+ bool batt_id_received;
+};
+
+struct inst_curr_result_list {
+ struct list_head list;
+ int *result;
+};
+
+/**
+ * struct ab8500_fg - ab8500 FG device information
+ * @dev: Pointer to the structure device
+ * @node: a list of AB8500 FGs, hence prepared for reentrance
+ * @irq holds the CCEOC interrupt number
+ * @vbat: Battery voltage in mV
+ * @vbat_nom: Nominal battery voltage in mV
+ * @inst_curr: Instantenous battery current in mA
+ * @avg_curr: Average battery current in mA
+ * @bat_temp battery temperature
+ * @fg_samples: Number of samples used in the FG accumulation
+ * @accu_charge: Accumulated charge from the last conversion
+ * @recovery_cnt: Counter for recovery mode
+ * @high_curr_cnt: Counter for high current mode
+ * @init_cnt: Counter for init mode
+ * @recovery_needed: Indicate if recovery is needed
+ * @high_curr_mode: Indicate if we're in high current mode
+ * @init_capacity: Indicate if initial capacity measuring should be done
+ * @turn_off_fg: True if fg was off before current measurement
+ * @calib_state State during offset calibration
+ * @discharge_state: Current discharge state
+ * @charge_state: Current charge state
+ * @ab8500_fg_complete Completion struct used for the instant current reading
+ * @flags: Structure for information about events triggered
+ * @bat_cap: Structure for battery capacity specific parameters
+ * @avg_cap: Average capacity filter
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the abx500_fg platform data
+ * @bat: Pointer to the abx500_bm platform data
+ * @fg_psy: Structure that holds the FG specific battery properties
+ * @fg_wq: Work queue for running the FG algorithm
+ * @fg_periodic_work: Work to run the FG algorithm periodically
+ * @fg_low_bat_work: Work to check low bat condition
+ * @fg_reinit_work Work used to reset and reinitialise the FG algorithm
+ * @fg_work: Work to run the FG algorithm instantly
+ * @fg_acc_cur_work: Work to read the FG accumulator
+ * @fg_check_hw_failure_work: Work for checking HW state
+ * @cc_lock: Mutex for locking the CC
+ * @fg_kobject: Structure of type kobject
+ */
+struct ab8500_fg {
+ struct device *dev;
+ struct list_head node;
+ int irq;
+ int vbat;
+ int vbat_nom;
+ int inst_curr;
+ int avg_curr;
+ int bat_temp;
+ int fg_samples;
+ int accu_charge;
+ int recovery_cnt;
+ int high_curr_cnt;
+ int init_cnt;
+ bool recovery_needed;
+ bool high_curr_mode;
+ bool init_capacity;
+ bool turn_off_fg;
+ enum ab8500_fg_calibration_state calib_state;
+ enum ab8500_fg_discharge_state discharge_state;
+ enum ab8500_fg_charge_state charge_state;
+ struct completion ab8500_fg_complete;
+ struct ab8500_fg_flags flags;
+ struct ab8500_fg_battery_capacity bat_cap;
+ struct ab8500_fg_avg_cap avg_cap;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct abx500_fg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply fg_psy;
+ struct workqueue_struct *fg_wq;
+ struct delayed_work fg_periodic_work;
+ struct delayed_work fg_low_bat_work;
+ struct delayed_work fg_reinit_work;
+ struct work_struct fg_work;
+ struct work_struct fg_acc_cur_work;
+ struct delayed_work fg_check_hw_failure_work;
+ struct mutex cc_lock;
+ struct kobject fg_kobject;
+};
+static LIST_HEAD(ab8500_fg_list);
+
+/**
+ * ab8500_fg_get() - returns a reference to the primary AB8500 fuel gauge
+ * (i.e. the first fuel gauge in the instance list)
+ */
+struct ab8500_fg *ab8500_fg_get(void)
+{
+ struct ab8500_fg *fg;
+
+ if (list_empty(&ab8500_fg_list))
+ return NULL;
+
+ fg = list_first_entry(&ab8500_fg_list, struct ab8500_fg, node);
+ return fg;
+}
+
+/* Main battery properties */
+static enum power_supply_property ab8500_fg_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+/*
+ * This array maps the raw hex value to lowbat voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_fg_lowbat_voltage_map[] = {
+ 2300 ,
+ 2325 ,
+ 2350 ,
+ 2375 ,
+ 2400 ,
+ 2425 ,
+ 2450 ,
+ 2475 ,
+ 2500 ,
+ 2525 ,
+ 2550 ,
+ 2575 ,
+ 2600 ,
+ 2625 ,
+ 2650 ,
+ 2675 ,
+ 2700 ,
+ 2725 ,
+ 2750 ,
+ 2775 ,
+ 2800 ,
+ 2825 ,
+ 2850 ,
+ 2875 ,
+ 2900 ,
+ 2925 ,
+ 2950 ,
+ 2975 ,
+ 3000 ,
+ 3025 ,
+ 3050 ,
+ 3075 ,
+ 3100 ,
+ 3125 ,
+ 3150 ,
+ 3175 ,
+ 3200 ,
+ 3225 ,
+ 3250 ,
+ 3275 ,
+ 3300 ,
+ 3325 ,
+ 3350 ,
+ 3375 ,
+ 3400 ,
+ 3425 ,
+ 3450 ,
+ 3475 ,
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3850 ,
+};
+
+static u8 ab8500_volt_to_regval(int voltage)
+{
+ int i;
+
+ if (voltage < ab8500_fg_lowbat_voltage_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_lowbat_voltage_map); i++) {
+ if (voltage < ab8500_fg_lowbat_voltage_map[i])
+ return (u8) i - 1;
+ }
+
+ /* If not captured above, return index of last element */
+ return (u8) ARRAY_SIZE(ab8500_fg_lowbat_voltage_map) - 1;
+}
+
+/**
+ * ab8500_fg_is_low_curr() - Low or high current mode
+ * @di: pointer to the ab8500_fg structure
+ * @curr: the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
+{
+ /*
+ * We want to know if we're in low current mode
+ */
+ if (curr > -di->bat->fg_params->high_curr_threshold)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ab8500_fg_add_cap_sample() - Add capacity to average filter
+ * @di: pointer to the ab8500_fg structure
+ * @sample: the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
+{
+ struct timespec ts;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ do {
+ avg->sum += sample - avg->samples[avg->pos];
+ avg->samples[avg->pos] = sample;
+ avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->pos++;
+
+ if (avg->pos == NBR_AVG_SAMPLES)
+ avg->pos = 0;
+
+ if (avg->nbr_samples < NBR_AVG_SAMPLES)
+ avg->nbr_samples++;
+
+ /*
+ * Check the time stamp for each sample. If too old,
+ * replace with latest sample
+ */
+ } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+ avg->avg = avg->sum / avg->nbr_samples;
+
+ return avg->avg;
+}
+
+/**
+ * ab8500_fg_clear_cap_samples() - Clear average filter
+ * @di: pointer to the ab8500_fg structure
+ *
+ * The capacity filter is is reset to zero.
+ */
+static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
+{
+ int i;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ avg->pos = 0;
+ avg->nbr_samples = 0;
+ avg->sum = 0;
+ avg->avg = 0;
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = 0;
+ avg->time_stamps[i] = 0;
+ }
+}
+
+/**
+ * ab8500_fg_fill_cap_sample() - Fill average filter
+ * @di: pointer to the ab8500_fg structure
+ * @sample: the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
+{
+ int i;
+ struct timespec ts;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = sample;
+ avg->time_stamps[i] = ts.tv_sec;
+ }
+
+ avg->pos = 0;
+ avg->nbr_samples = NBR_AVG_SAMPLES;
+ avg->sum = sample * NBR_AVG_SAMPLES;
+ avg->avg = sample;
+}
+
+/**
+ * ab8500_fg_coulomb_counter() - enable coulomb counter
+ * @di: pointer to the ab8500_fg structure
+ * @enable: enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
+{
+ int ret = 0;
+ mutex_lock(&di->cc_lock);
+ if (enable) {
+ /* To be able to reprogram the number of samples, we have to
+ * first stop the CC and then enable it again */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0x00);
+ if (ret)
+ goto cc_err;
+
+ /* Program the samples */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+ di->fg_samples);
+ if (ret)
+ goto cc_err;
+
+ /* Start the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG,
+ (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = true;
+ } else {
+ /* Clear any pending read requests */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ if (ret)
+ goto cc_err;
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU_CTRL, 0);
+ if (ret)
+ goto cc_err;
+
+ /* Stop the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0);
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = false;
+
+ }
+ dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+ enable, di->fg_samples);
+
+ mutex_unlock(&di->cc_lock);
+
+ return ret;
+cc_err:
+ dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_start() - start battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns 0 or error code
+ * Note: This is part "one" and has to be called before
+ * ab8500_fg_inst_curr_finalize()
+ */
+ int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+{
+ u8 reg_val;
+ int ret;
+
+ mutex_lock(&di->cc_lock);
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, &reg_val);
+ if (ret < 0)
+ goto fail;
+
+ if (!(reg_val & CC_PWR_UP_ENA)) {
+ dev_dbg(di->dev, "%s Enable FG\n", __func__);
+ di->turn_off_fg = true;
+
+ /* Program the samples */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+ SEC_TO_SAMPLE(10));
+ if (ret)
+ goto fail;
+
+ /* Start the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG,
+ (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+ if (ret)
+ goto fail;
+ } else {
+ di->turn_off_fg = false;
+ }
+
+ /* Return and WFI */
+ INIT_COMPLETION(di->ab8500_fg_complete);
+ enable_irq(di->irq);
+
+ /* Note: cc_lock is still locked */
+ return 0;
+fail:
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_done() - check if fg conversion is done
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns 1 if conversion done, 0 if still waiting
+ */
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
+{
+ return completion_done(&di->ab8500_fg_complete);
+}
+
+/**
+ * ab8500_fg_inst_curr_finalize() - battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ * @res: battery instantenous current(on success)
+ *
+ * Returns 0 or an error code
+ * Note: This is part "two" and has to be called at earliest 250 ms
+ * after ab8500_fg_inst_curr_start()
+ */
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+{
+ u8 low, high;
+ int val;
+ int ret;
+ int timeout;
+
+ if (!completion_done(&di->ab8500_fg_complete)) {
+ timeout = wait_for_completion_timeout(&di->ab8500_fg_complete,
+ INS_CURR_TIMEOUT);
+ dev_dbg(di->dev, "Finalize time: %d ms\n",
+ ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
+ if (!timeout) {
+ ret = -ETIME;
+ disable_irq(di->irq);
+ dev_err(di->dev, "completion timed out [%d]\n",
+ __LINE__);
+ goto fail;
+ }
+ }
+
+ disable_irq(di->irq);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ READ_REQ, READ_REQ);
+
+ /* 100uS between read request and read is needed */
+ usleep_range(100, 100);
+
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_SMPL_CNVL_REG, &low);
+ if (ret < 0)
+ goto fail;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_SMPL_CNVH_REG, &high);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * negative value for Discharging
+ * convert 2's compliment into decimal
+ */
+ if (high & 0x10)
+ val = (low | (high << 8) | 0xFFFFE000);
+ else
+ val = (low | (high << 8));
+
+ /*
+ * Convert to unit value in mA
+ * Full scale input voltage is
+ * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+ * Given a 250ms conversion cycle time the LSB corresponds
+ * to 112.9 nAh. Convert to current by dividing by the conversion
+ * time in hours (250ms = 1 / (3600 * 4)h)
+ * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ */
+ val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
+ (1000 * di->bat->fg_res);
+
+ if (di->turn_off_fg) {
+ dev_dbg(di->dev, "%s Disable FG\n", __func__);
+
+ /* Clear any pending read requests */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ if (ret)
+ goto fail;
+
+ /* Stop the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0);
+ if (ret)
+ goto fail;
+ }
+ mutex_unlock(&di->cc_lock);
+ (*res) = val;
+
+ return 0;
+fail:
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_blocking() - battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ * @res: battery instantenous current(on success)
+ *
+ * Returns 0 else error code
+ */
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
+{
+ int ret;
+ int res = 0;
+
+ ret = ab8500_fg_inst_curr_start(di);
+ if (ret) {
+ dev_err(di->dev, "Failed to initialize fg_inst\n");
+ return 0;
+ }
+
+ ret = ab8500_fg_inst_curr_finalize(di, &res);
+ if (ret) {
+ dev_err(di->dev, "Failed to finalize fg_inst\n");
+ return 0;
+ }
+
+ return res;
+}
+
+/**
+ * ab8500_fg_acc_cur_work() - average battery current
+ * @work: pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab8500_fg_acc_cur_work(struct work_struct *work)
+{
+ int val;
+ int ret;
+ u8 low, med, high;
+
+ struct ab8500_fg *di = container_of(work,
+ struct ab8500_fg, fg_acc_cur_work);
+
+ mutex_lock(&di->cc_lock);
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_CTRL, RD_NCONV_ACCU_REQ);
+ if (ret)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_LOW, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_MED, &med);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_HIGH, &high);
+ if (ret < 0)
+ goto exit;
+
+ /* Check for sign bit in case of negative value, 2's compliment */
+ if (high & 0x10)
+ val = (low | (med << 8) | (high << 16) | 0xFFE00000);
+ else
+ val = (low | (med << 8) | (high << 16));
+
+ /*
+ * Convert to uAh
+ * Given a 250ms conversion cycle time the LSB corresponds
+ * to 112.9 nAh.
+ * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ */
+ di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
+ (100 * di->bat->fg_res);
+
+ /*
+ * Convert to unit value in mA
+ * Full scale input voltage is
+ * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+ * Given a 250ms conversion cycle time the LSB corresponds
+ * to 112.9 nAh. Convert to current by dividing by the conversion
+ * time in hours (= samples / (3600 * 4)h)
+ * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ */
+ di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
+ (1000 * di->bat->fg_res * (di->fg_samples / 4));
+
+ di->flags.conv_done = true;
+
+ mutex_unlock(&di->cc_lock);
+
+ queue_work(di->fg_wq, &di->fg_work);
+
+ return;
+exit:
+ dev_err(di->dev,
+ "Failed to read or write gas gauge registers\n");
+ mutex_unlock(&di->cc_lock);
+ queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab8500_fg_bat_voltage() - get battery voltage
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
+{
+ int vbat;
+ static int prev;
+
+ vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+ if (vbat < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value\n",
+ __func__);
+ return prev;
+ }
+
+ prev = vbat;
+ return vbat;
+}
+
+/**
+ * ab8500_fg_volt_to_capacity() - Voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ * @voltage: The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
+{
+ int i, tbl_size;
+ struct abx500_v_to_cap *tbl;
+ int cap = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (voltage > tbl[i].voltage)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ cap = interpolate(voltage,
+ tbl[i].voltage,
+ tbl[i].capacity * 10,
+ tbl[i-1].voltage,
+ tbl[i-1].capacity * 10);
+ } else if (i == 0) {
+ cap = 1000;
+ } else {
+ cap = 0;
+ }
+
+ dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+ __func__, voltage, cap);
+
+ return cap;
+}
+
+/**
+ * ab8500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
+{
+ di->vbat = ab8500_fg_bat_voltage(di);
+ return ab8500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab8500_fg_battery_resistance() - Returns the battery inner resistance
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery inner resistance added with the fuel gauge resistor value
+ * to get the total resistance in the whole link from gnd to bat+ node.
+ */
+static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
+{
+ int i, tbl_size;
+ struct batres_vs_temp *tbl;
+ int resist = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl;
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (di->bat_temp / 10 > tbl[i].temp)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ resist = interpolate(di->bat_temp / 10,
+ tbl[i].temp,
+ tbl[i].resist,
+ tbl[i-1].temp,
+ tbl[i-1].resist);
+ } else if (i == 0) {
+ resist = tbl[0].resist;
+ } else {
+ resist = tbl[tbl_size - 1].resist;
+ }
+
+ dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
+ " fg resistance %d, total: %d (mOhm)\n",
+ __func__, di->bat_temp, resist, di->bat->fg_res / 10,
+ (di->bat->fg_res / 10) + resist);
+
+ /* fg_res variable is in 0.1mOhm */
+ resist += di->bat->fg_res / 10;
+
+ return resist;
+}
+
+/**
+ * ab8500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
+{
+ int vbat_comp, res;
+ int i = 0;
+ int vbat = 0;
+
+ ab8500_fg_inst_curr_start(di);
+
+ do {
+ vbat += ab8500_fg_bat_voltage(di);
+ i++;
+ msleep(5);
+ } while (!ab8500_fg_inst_curr_done(di));
+
+ ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
+
+ di->vbat = vbat / i;
+ res = ab8500_fg_battery_resistance(di);
+
+ /* Use Ohms law to get the load compensated voltage */
+ vbat_comp = di->vbat - (di->inst_curr * res) / 1000;
+
+ dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+ "R: %dmOhm, Current: %dmA Vbat Samples: %d\n",
+ __func__, di->vbat, vbat_comp, res, di->inst_curr, i);
+
+ return ab8500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab8500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di: pointer to the ab8500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab8500_fg_convert_mah_to_permille(struct ab8500_fg *di, int cap_mah)
+{
+ return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab8500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di: pointer to the ab8500_fg structure
+ * @cap_pm: capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab8500_fg_convert_permille_to_mah(struct ab8500_fg *di, int cap_pm)
+{
+ return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab8500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di: pointer to the ab8500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab8500_fg_convert_mah_to_uwh(struct ab8500_fg *di, int cap_mah)
+{
+ u64 div_res;
+ u32 div_rem;
+
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+ div_rem = do_div(div_res, 1000);
+
+ /* Make sure to round upwards if necessary */
+ if (div_rem >= 1000 / 2)
+ div_res++;
+
+ return (int) div_res;
+}
+
+/**
+ * ab8500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab8500_fg_calc_cap_charging(struct ab8500_fg *di)
+{
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+ /*
+ * We force capacity to 100% once when the algorithm
+ * reports that it's full.
+ */
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+ di->flags.force_full) {
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+ }
+
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ /* We need to update battery voltage and inst current when charging */
+ di->vbat = ab8500_fg_bat_voltage(di);
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di: pointer to the ab8500_fg structure
+ * @comp: if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_voltage(struct ab8500_fg *di, bool comp)
+{
+ int permille, mah;
+
+ if (comp)
+ permille = ab8500_fg_load_comp_volt_to_capacity(di);
+ else
+ permille = ab8500_fg_uncomp_volt_to_capacity(di);
+
+ mah = ab8500_fg_convert_permille_to_mah(di, permille);
+
+ di->bat_cap.mah = ab8500_fg_add_cap_sample(di, mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_fg(struct ab8500_fg *di)
+{
+ int permille_volt, permille;
+
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ /*
+ * Check against voltage based capacity. It can not be lower
+ * than what the uncompensated voltage says
+ */
+ permille = ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ permille_volt = ab8500_fg_uncomp_volt_to_capacity(di);
+
+ if (permille < permille_volt) {
+ di->bat_cap.permille = permille_volt;
+ di->bat_cap.mah = ab8500_fg_convert_permille_to_mah(di,
+ di->bat_cap.permille);
+
+ dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+ __func__,
+ permille,
+ permille_volt);
+
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ } else {
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ }
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_capacity_level() - Get the battery capacity level
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab8500_fg_capacity_level(struct ab8500_fg *di)
+{
+ int ret, percent;
+
+ percent = di->bat_cap.permille / 10;
+
+ if (percent <= di->bat->cap_levels->critical ||
+ di->flags.low_bat)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (percent <= di->bat->cap_levels->low)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (percent <= di->bat->cap_levels->normal)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (percent <= di->bat->cap_levels->high)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ return ret;
+}
+
+/**
+ * ab8500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di: pointer to the ab8500_fg structure
+ * @init: capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
+{
+ bool changed = false;
+
+ di->bat_cap.level = ab8500_fg_capacity_level(di);
+
+ if (di->bat_cap.level != di->bat_cap.prev_level) {
+ /*
+ * We do not allow reported capacity level to go up
+ * unless we're charging or if we're in init
+ */
+ if (!(!di->flags.charging && di->bat_cap.level >
+ di->bat_cap.prev_level) || init) {
+ dev_dbg(di->dev, "level changed from %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ di->bat_cap.prev_level = di->bat_cap.level;
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "level not allowed to go up "
+ "since no charger is connected: %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ }
+ }
+
+ /*
+ * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+ * shutdown
+ */
+ if (di->flags.low_bat) {
+ dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+ di->bat_cap.prev_percent = 0;
+ di->bat_cap.permille = 0;
+ di->bat_cap.prev_mah = 0;
+ di->bat_cap.mah = 0;
+ changed = true;
+ } else if (di->flags.fully_charged) {
+ /*
+ * We report 100% if algorithm reported fully charged
+ * unless capacity drops too much
+ */
+ if (di->flags.force_full) {
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+ } else if (!di->flags.force_full &&
+ di->bat_cap.prev_percent !=
+ (di->bat_cap.permille) / 10 &&
+ (di->bat_cap.permille / 10) <
+ di->bat->fg_params->maint_thres) {
+ dev_dbg(di->dev,
+ "battery reported full "
+ "but capacity dropping: %d\n",
+ di->bat_cap.permille / 10);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ }
+ } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+ if (di->bat_cap.permille / 10 == 0) {
+ /*
+ * We will not report 0% unless we've got
+ * the LOW_BAT IRQ, no matter what the FG
+ * algorithm says.
+ */
+ di->bat_cap.prev_percent = 1;
+ di->bat_cap.permille = 1;
+ di->bat_cap.prev_mah = 1;
+ di->bat_cap.mah = 1;
+
+ changed = true;
+ } else if (!(!di->flags.charging &&
+ (di->bat_cap.permille / 10) >
+ di->bat_cap.prev_percent) || init) {
+ /*
+ * We do not allow reported capacity to go up
+ * unless we're charging or if we're in init
+ */
+ dev_dbg(di->dev,
+ "capacity changed from %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "capacity not allowed to go up since "
+ "no charger is connected: %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ }
+ }
+
+ if (changed) {
+ power_supply_changed(&di->fg_psy);
+ if (di->flags.fully_charged && di->flags.force_full) {
+ dev_dbg(di->dev, "Battery full, notifying.\n");
+ di->flags.force_full = false;
+ sysfs_notify(&di->fg_kobject, NULL, "charge_full");
+ }
+ sysfs_notify(&di->fg_kobject, NULL, "charge_now");
+ }
+}
+
+static void ab8500_fg_charge_state_to(struct ab8500_fg *di,
+ enum ab8500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+ di->charge_state,
+ charge_state[di->charge_state],
+ new_state,
+ charge_state[new_state]);
+
+ di->charge_state = new_state;
+}
+
+static void ab8500_fg_discharge_state_to(struct ab8500_fg *di,
+ enum ab8500_fg_discharge_state new_state)
+{
+ dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ di->discharge_state,
+ discharge_state[di->discharge_state],
+ new_state,
+ discharge_state[new_state]);
+
+ di->discharge_state = new_state;
+}
+
+/**
+ * ab8500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
+{
+ /*
+ * If we change to discharge mode
+ * we should start with recovery
+ */
+ if (di->discharge_state != AB8500_FG_DISCHARGE_INIT_RECOVERY)
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_INIT_RECOVERY);
+
+ switch (di->charge_state) {
+ case AB8500_FG_CHARGE_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_charging);
+
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
+
+ break;
+
+ case AB8500_FG_CHARGE_READOUT:
+ /*
+ * Read the FG and calculate the new capacity
+ */
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ ab8500_fg_calc_cap_charging(di);
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Check capacity limits */
+ ab8500_fg_check_capacity_limits(di, false);
+}
+
+static void force_capacity(struct ab8500_fg *di)
+{
+ int cap;
+
+ ab8500_fg_clear_cap_samples(di);
+ cap = di->bat_cap.user_mah;
+ if (cap > di->bat_cap.max_mah_design) {
+ dev_dbg(di->dev, "Remaining cap %d can't be bigger than total"
+ " %d\n", cap, di->bat_cap.max_mah_design);
+ cap = di->bat_cap.max_mah_design;
+ }
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.user_mah);
+ di->bat_cap.permille = ab8500_fg_convert_mah_to_permille(di, cap);
+ di->bat_cap.mah = cap;
+ ab8500_fg_check_capacity_limits(di, true);
+}
+
+static bool check_sysfs_capacity(struct ab8500_fg *di)
+{
+ int cap, lower, upper;
+ int cap_permille;
+
+ cap = di->bat_cap.user_mah;
+
+ cap_permille = ab8500_fg_convert_mah_to_permille(di,
+ di->bat_cap.user_mah);
+
+ lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10;
+ upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10;
+
+ if (lower < 0)
+ lower = 0;
+ /* 1000 is permille, -> 100 percent */
+ if (upper > 1000)
+ upper = 1000;
+
+ dev_dbg(di->dev, "Capacity limits:"
+ " (Lower: %d User: %d Upper: %d) [user: %d, was: %d]\n",
+ lower, cap_permille, upper, cap, di->bat_cap.mah);
+
+ /* If within limits, use the saved capacity and exit estimation...*/
+ if (cap_permille > lower && cap_permille < upper) {
+ dev_dbg(di->dev, "OK! Using users cap %d uAh now\n", cap);
+ force_capacity(di);
+ return true;
+ }
+ dev_dbg(di->dev, "Capacity from user out of limits, ignoring");
+ return false;
+}
+
+/**
+ * ab8500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
+{
+ int sleep_time;
+
+ /* If we change to charge mode we should start with init */
+ if (di->charge_state != AB8500_FG_CHARGE_INIT)
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+
+ switch (di->discharge_state) {
+ case AB8500_FG_DISCHARGE_INIT:
+ /* We use the FG IRQ to work on */
+ di->init_cnt = 0;
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_INITMEASURING);
+
+ /* Intentional fallthrough */
+ case AB8500_FG_DISCHARGE_INITMEASURING:
+ /*
+ * Discard a number of samples during startup.
+ * After that, use compensated voltage for a few
+ * samples to get an initial capacity.
+ * Then go to READOUT
+ */
+ sleep_time = di->bat->fg_params->init_timer;
+
+ /* Discard the first [x] seconds */
+ if (di->init_cnt >
+ di->bat->fg_params->init_discard_time) {
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+ ab8500_fg_check_capacity_limits(di, true);
+ }
+
+ di->init_cnt += sleep_time;
+ if (di->init_cnt > di->bat->fg_params->init_total_time)
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT_INIT);
+
+ break;
+
+ case AB8500_FG_DISCHARGE_INIT_RECOVERY:
+ di->recovery_cnt = 0;
+ di->recovery_needed = true;
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_RECOVERY);
+
+ /* Intentional fallthrough */
+
+ case AB8500_FG_DISCHARGE_RECOVERY:
+ sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+ /*
+ * We should check the power consumption
+ * If low, go to READOUT (after x min) or
+ * RECOVERY_SLEEP if time left.
+ * If high, go to READOUT
+ */
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ if (di->recovery_cnt >
+ di->bat->fg_params->recovery_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ di->recovery_needed = false;
+ } else {
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ sleep_time * HZ);
+ }
+ di->recovery_cnt += sleep_time;
+ } else {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ }
+ break;
+
+ case AB8500_FG_DISCHARGE_READOUT_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ break;
+
+ case AB8500_FG_DISCHARGE_READOUT:
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ /* Detect mode change */
+ if (di->high_curr_mode) {
+ di->high_curr_mode = false;
+ di->high_curr_cnt = 0;
+ }
+
+ if (di->recovery_needed) {
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_RECOVERY);
+
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work, 0);
+
+ break;
+ }
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ } else {
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ /* Detect mode change */
+ if (!di->high_curr_mode) {
+ di->high_curr_mode = true;
+ di->high_curr_cnt = 0;
+ }
+
+ di->high_curr_cnt +=
+ di->bat->fg_params->accu_high_curr;
+ if (di->high_curr_cnt >
+ di->bat->fg_params->high_curr_time)
+ di->recovery_needed = true;
+
+ ab8500_fg_calc_cap_discharge_fg(di);
+ }
+
+ ab8500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ case AB8500_FG_DISCHARGE_WAKEUP:
+ ab8500_fg_coulomb_counter(di, true);
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+
+ ab8500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ab8500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di: pointer to the ab8500_fg structure
+ *
+ */
+static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
+{
+ int ret;
+
+ switch (di->calib_state) {
+ case AB8500_FG_CALIB_INIT:
+ dev_dbg(di->dev, "Calibration ongoing...\n");
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_INT_CAL_N_AVG_MASK, CC_INT_CAL_SAMPLES_8);
+ if (ret < 0)
+ goto err;
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_INTAVGOFFSET_ENA, CC_INTAVGOFFSET_ENA);
+ if (ret < 0)
+ goto err;
+ di->calib_state = AB8500_FG_CALIB_WAIT;
+ break;
+ case AB8500_FG_CALIB_END:
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_MUXOFFSET, CC_MUXOFFSET);
+ if (ret < 0)
+ goto err;
+ di->flags.calibrate = false;
+ dev_dbg(di->dev, "Calibration done...\n");
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ break;
+ case AB8500_FG_CALIB_WAIT:
+ dev_dbg(di->dev, "Calibration WFI\n");
+ default:
+ break;
+ }
+ return;
+err:
+ /* Something went wrong, don't calibrate then */
+ dev_err(di->dev, "failed to calibrate the CC\n");
+ di->flags.calibrate = false;
+ di->calib_state = AB8500_FG_CALIB_INIT;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab8500_fg_algorithm() - Entry point for the FG algorithm
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab8500_fg_algorithm(struct ab8500_fg *di)
+{
+ if (di->flags.calibrate)
+ ab8500_fg_algorithm_calibrate(di);
+ else {
+ if (di->flags.charging)
+ ab8500_fg_algorithm_charging(di);
+ else
+ ab8500_fg_algorithm_discharging(di);
+ }
+
+ dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+ "%d %d %d %d %d %d %d\n",
+ di->bat_cap.max_mah_design,
+ di->bat_cap.mah,
+ di->bat_cap.permille,
+ di->bat_cap.level,
+ di->bat_cap.prev_mah,
+ di->bat_cap.prev_percent,
+ di->bat_cap.prev_level,
+ di->vbat,
+ di->inst_curr,
+ di->avg_curr,
+ di->accu_charge,
+ di->flags.charging,
+ di->charge_state,
+ di->discharge_state,
+ di->high_curr_mode,
+ di->recovery_needed);
+}
+
+/**
+ * ab8500_fg_periodic_work() - Run the FG state machine periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab8500_fg_periodic_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_periodic_work.work);
+
+ if (di->init_capacity) {
+ /* A dummy read that will return 0 */
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ /* Get an initial capacity calculation */
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ ab8500_fg_check_capacity_limits(di, true);
+ di->init_capacity = false;
+
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else if (di->flags.user_cap) {
+ if (check_sysfs_capacity(di)) {
+ ab8500_fg_check_capacity_limits(di, true);
+ if (di->flags.charging)
+ ab8500_fg_charge_state_to(di,
+ AB8500_FG_CHARGE_INIT);
+ else
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT_INIT);
+ }
+ di->flags.user_cap = false;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else
+ ab8500_fg_algorithm(di);
+
+}
+
+/**
+ * ab8500_fg_check_hw_failure_work() - Check OVV_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the OVV_BAT condition
+ */
+static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_check_hw_failure_work.work);
+
+ /*
+ * If we have had a battery over-voltage situation,
+ * check ovv-bit to see if it should be reset.
+ */
+ if (di->flags.bat_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STAT_REG,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if ((reg_value & BATT_OVV) != BATT_OVV) {
+ dev_dbg(di->dev, "Battery recovered from OVV\n");
+ di->flags.bat_ovv = false;
+ power_supply_changed(&di->fg_psy);
+ return;
+ }
+
+ /* Not yet recovered from ovv, reschedule this test */
+ queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work,
+ round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab8500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab8500_fg_low_bat_work(struct work_struct *work)
+{
+ int vbat;
+
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_low_bat_work.work);
+
+ vbat = ab8500_fg_bat_voltage(di);
+
+ /* Check if LOW_BAT still fulfilled */
+ if (vbat < di->bat->fg_params->lowbat_threshold) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+
+ /*
+ * We need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ } else {
+ di->flags.low_bat = false;
+ dev_warn(di->dev, "Battery voltage OK again\n");
+ }
+
+ /* This is needed to dispatch LOW_BAT */
+ ab8500_fg_check_capacity_limits(di, false);
+
+ /* Set this flag to check if LOW_BAT IRQ still occurs */
+ di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab8500_fg_battok_calc - calculate the bit pattern corresponding
+ * to the target voltage.
+ * @di: pointer to the ab8500_fg structure
+ * @target target voltage
+ *
+ * Returns bit pattern closest to the target voltage
+ * valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
+ */
+
+static int ab8500_fg_battok_calc(struct ab8500_fg *di, int target)
+{
+ if (target > BATT_OK_MIN +
+ (BATT_OK_INCREMENT * BATT_OK_MAX_NR_INCREMENTS))
+ return BATT_OK_MAX_NR_INCREMENTS;
+ if (target < BATT_OK_MIN)
+ return 0;
+ return (target - BATT_OK_MIN) / BATT_OK_INCREMENT;
+}
+
+/**
+ * ab8500_fg_battok_init_hw_register - init battok levels
+ * @di: pointer to the ab8500_fg structure
+ *
+ */
+
+static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
+{
+ int selected;
+ int sel0;
+ int sel1;
+ int cbp_sel0;
+ int cbp_sel1;
+ int ret;
+ int new_val;
+
+ sel0 = di->bat->fg_params->battok_falling_th_sel0;
+ sel1 = di->bat->fg_params->battok_raising_th_sel1;
+
+ cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
+ cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
+
+ selected = BATT_OK_MIN + cbp_sel0 * BATT_OK_INCREMENT;
+
+ if (selected != sel0)
+ dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
+ sel0, selected, cbp_sel0);
+
+ selected = BATT_OK_MIN + cbp_sel1 * BATT_OK_INCREMENT;
+
+ if (selected != sel1)
+ dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
+ sel1, selected, cbp_sel1);
+
+ new_val = cbp_sel0 | (cbp_sel1 << 4);
+
+ dev_dbg(di->dev, "using: %x %d %d\n", new_val, cbp_sel0, cbp_sel1);
+ ret = abx500_set_register_interruptible(di->dev, AB8500_SYS_CTRL2_BLOCK,
+ AB8500_BATT_OK_REG, new_val);
+ return ret;
+}
+
+/**
+ * ab8500_fg_instant_work() - Run the FG state machine instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab8500_fg_instant_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work);
+
+ ab8500_fg_algorithm(di);
+}
+
+/**
+ * ab8500_fg_cc_data_end_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+ complete(&di->ab8500_fg_complete);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_int_calib_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+ di->calib_state = AB8500_FG_CALIB_END;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_convend_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ queue_work(di->fg_wq, &di->fg_acc_cur_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_batt_ovv_handler() - Battery OVV occured
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ dev_dbg(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
+ power_supply_changed(&di->fg_psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_lowbatf_handler() - Battery voltage is below LOW threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ if (!di->flags.low_bat_delay) {
+ dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+ di->flags.low_bat_delay = true;
+ /*
+ * Start a timer to check LOW_BAT again after some time
+ * This is done to avoid shutdown on single voltage dips
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_get_property() - get the fg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now: battery voltage
+ * current_now: battery instant current
+ * current_avg: battery average current
+ * charge_full_design: capacity where battery is considered full
+ * charge_now: battery capacity in nAh
+ * capacity: capacity in percent
+ * capacity_level: capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_fg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_fg *di;
+
+ di = to_ab8500_fg_device_info(psy);
+
+ /*
+ * If battery is identified as unknown and charging of unknown
+ * batteries is disabled, we always report 100% capacity and
+ * capacity level UNKNOWN, since we can't calculate
+ * remaining capacity
+ */
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (di->flags.bat_ovv)
+ val->intval = BATT_OVV_VALUE * 1000;
+ else
+ val->intval = di->vbat * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = di->inst_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = di->avg_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah_design);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ else
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.prev_mah);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = di->bat_cap.max_mah_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = di->bat_cap.max_mah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = di->bat_cap.max_mah;
+ else
+ val->intval = di->bat_cap.prev_mah;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = 100;
+ else
+ val->intval = di->bat_cap.prev_percent;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else
+ val->intval = di->bat_cap.prev_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_fg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_fg_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ switch (ret.intval) {
+ case POWER_SUPPLY_STATUS_UNKNOWN:
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (!di->flags.charging)
+ break;
+ di->flags.charging = false;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (di->flags.fully_charged)
+ break;
+ di->flags.fully_charged = true;
+ di->flags.force_full = true;
+ /* Save current capacity as maximum */
+ di->bat_cap.max_mah = di->bat_cap.mah;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ if (di->flags.charging)
+ break;
+ di->flags.charging = true;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ };
+ default:
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (!di->flags.batt_id_received) {
+ const struct abx500_battery_type *b;
+
+ b = &(di->bat->bat_type[di->bat->batt_id]);
+
+ di->flags.batt_id_received = true;
+
+ di->bat_cap.max_mah_design =
+ MILLI_TO_MICRO *
+ b->charge_full_design;
+
+ di->bat_cap.max_mah =
+ di->bat_cap.max_mah_design;
+
+ di->vbat_nom = b->nominal_voltage;
+ }
+
+ if (ret.intval)
+ di->flags.batt_unknown = false;
+ else
+ di->flags.batt_unknown = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (di->flags.batt_id_received)
+ di->bat_temp = ret.intval;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_fg_init_hw_registers() - Set up FG related registers
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
+{
+ int ret;
+
+ /* Set VBAT OVV threshold */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_BATT_OVV,
+ BATT_OVV_TH_4P75,
+ BATT_OVV_TH_4P75);
+ if (ret) {
+ dev_err(di->dev, "failed to set BATT_OVV\n");
+ goto out;
+ }
+
+ /* Enable VBAT OVV detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_BATT_OVV,
+ BATT_OVV_ENA,
+ BATT_OVV_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable BATT_OVV\n");
+ goto out;
+ }
+
+ /* Low Battery Voltage */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_LOW_BAT_REG,
+ ab8500_volt_to_regval(
+ di->bat->fg_params->lowbat_threshold) << 1 |
+ LOW_BAT_ENABLE);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ goto out;
+ }
+
+ /* Battery OK threshold */
+ ret = ab8500_fg_battok_init_hw_register(di);
+ if (ret) {
+ dev_err(di->dev, "BattOk init write failed.\n");
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/**
+ * ab8500_fg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_fg_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_fg *di = to_ab8500_fg_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->fg_psy, ab8500_fg_get_ext_psy_data);
+}
+
+/**
+ * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Used to reset the current battery capacity to be able to
+ * retrigger a new voltage base capacity calculation. For
+ * test and verification purpose.
+ */
+static void ab8500_fg_reinit_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_reinit_work.work);
+
+ if (di->flags.calibrate == false) {
+ dev_dbg(di->dev, "Resetting FG state machine to init.\n");
+ ab8500_fg_clear_cap_samples(di);
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ } else {
+ dev_err(di->dev, "Residual offset calibration ongoing "
+ "retrying..\n");
+ /* Wait one second until next try*/
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
+ round_jiffies(1));
+ }
+}
+
+/**
+ * ab8500_fg_reinit() - forces FG algorithm to reinitialize with current values
+ *
+ * This function can be used to force the FG algorithm to recalculate a new
+ * voltage based battery capacity.
+ */
+void ab8500_fg_reinit(void)
+{
+ struct ab8500_fg *di = ab8500_fg_get();
+ /* User won't be notified if a null pointer returned. */
+ if (di != NULL)
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
+}
+
+/* Exposure to the sysfs interface */
+
+struct ab8500_fg_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct ab8500_fg *, char *);
+ ssize_t (*store)(struct ab8500_fg *, const char *, size_t);
+};
+
+static ssize_t charge_full_show(struct ab8500_fg *di, char *buf)
+{
+ return sprintf(buf, "%d\n", di->bat_cap.max_mah);
+}
+
+static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
+ size_t count)
+{
+ unsigned long charge_full;
+ ssize_t ret = -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &charge_full);
+
+ dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
+
+ if (!ret) {
+ di->bat_cap.max_mah = (int) charge_full;
+ ret = count;
+ }
+ return ret;
+}
+
+static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
+{
+ return sprintf(buf, "%d\n", di->bat_cap.prev_mah);
+}
+
+static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
+ size_t count)
+{
+ unsigned long charge_now;
+ ssize_t ret;
+
+ ret = strict_strtoul(buf, 10, &charge_now);
+
+ dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
+ ret, charge_now, di->bat_cap.prev_mah);
+
+ if (!ret) {
+ di->bat_cap.user_mah = (int) charge_now;
+ di->flags.user_cap = true;
+ ret = count;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ }
+ return ret;
+}
+
+static struct ab8500_fg_sysfs_entry charge_full_attr =
+ __ATTR(charge_full, 0644, charge_full_show, charge_full_store);
+
+static struct ab8500_fg_sysfs_entry charge_now_attr =
+ __ATTR(charge_now, 0644, charge_now_show, charge_now_store);
+
+static ssize_t
+ab8500_fg_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct ab8500_fg_sysfs_entry *entry;
+ struct ab8500_fg *di;
+
+ entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+ di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(di, buf);
+}
+static ssize_t
+ab8500_fg_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ struct ab8500_fg_sysfs_entry *entry;
+ struct ab8500_fg *di;
+
+ entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+ di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(di, buf, count);
+}
+
+static const struct sysfs_ops ab8500_fg_sysfs_ops = {
+ .show = ab8500_fg_show,
+ .store = ab8500_fg_store,
+};
+
+static struct attribute *ab8500_fg_attrs[] = {
+ &charge_full_attr.attr,
+ &charge_now_attr.attr,
+ NULL,
+};
+
+static struct kobj_type ab8500_fg_ktype = {
+ .sysfs_ops = &ab8500_fg_sysfs_ops,
+ .default_attrs = ab8500_fg_attrs,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
+{
+ kobject_del(&di->fg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_fg_sysfs_init(struct ab8500_fg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->fg_kobject,
+ &ab8500_fg_ktype,
+ NULL, "battery");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int ab8500_fg_resume(struct platform_device *pdev)
+{
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ /*
+ * Change state if we're not charging. If we're charging we will wake
+ * up on the FG IRQ
+ */
+ if (!di->flags.charging) {
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_WAKEUP);
+ queue_work(di->fg_wq, &di->fg_work);
+ }
+
+ return 0;
+}
+
+static int ab8500_fg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ flush_delayed_work(&di->fg_periodic_work);
+
+ /*
+ * If the FG is enabled we will disable it before going to suspend
+ * only if we're not charging
+ */
+ if (di->flags.fg_enabled && !di->flags.charging)
+ ab8500_fg_coulomb_counter(di, false);
+
+ return 0;
+}
+#else
+#define ab8500_fg_suspend NULL
+#define ab8500_fg_resume NULL
+#endif
+
+static int __devexit ab8500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ list_del(&di->node);
+
+ /* Disable coulomb counter */
+ ret = ab8500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(di->dev, "failed to disable coulomb counter\n");
+
+ destroy_workqueue(di->fg_wq);
+ ab8500_fg_sysfs_exit(di);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->fg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+ return ret;
+}
+
+/* ab8500 fg driver interrupts and their respective isr */
+static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
+ {"NCONV_ACCU", ab8500_fg_cc_convend_handler},
+ {"BATT_OVV", ab8500_fg_batt_ovv_handler},
+ {"LOW_BAT_F", ab8500_fg_lowbatf_handler},
+ {"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
+ {"CCEOC", ab8500_fg_cc_data_end_handler},
+};
+
+static int __devinit ab8500_fg_probe(struct platform_device *pdev)
+{
+ int i, irq;
+ int ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab8500_fg *di =
+ kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* get fg specific platform data */
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->fg;
+ if (!di->pdata) {
+ dev_err(di->dev, "no fg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ di->bat = plat_data->battery;
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ di->fg_psy.name = "ab8500_fg";
+ di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->fg_psy.properties = ab8500_fg_props;
+ di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
+ di->fg_psy.get_property = ab8500_fg_get_property;
+ di->fg_psy.supplied_to = di->pdata->supplied_to;
+ di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
+
+ di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+ di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+ di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+ di->init_capacity = true;
+
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
+ if (di->fg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for running the fg algorithm instantly */
+ INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
+
+ /* Init work for getting the battery accumulated current */
+ INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
+
+ /* Init work for reinitialising the fg algorithm */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+ ab8500_fg_reinit_work);
+
+ /* Work delayed Queue to run the state machine */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ ab8500_fg_periodic_work);
+
+ /* Work to check low battery condition */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ ab8500_fg_low_bat_work);
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
+ ab8500_fg_check_hw_failure_work);
+
+ /* Initialize OVV, and other registers */
+ ret = ab8500_fg_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize registers\n");
+ goto free_inst_curr_wq;
+ }
+
+ /* Consider battery unknown until we're informed otherwise */
+ di->flags.batt_unknown = true;
+ di->flags.batt_id_received = false;
+
+ /* Register FG power supply class */
+ ret = power_supply_register(di->dev, &di->fg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register FG psy\n");
+ goto free_inst_curr_wq;
+ }
+
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab8500_fg_coulomb_counter(di, true);
+
+ /* Initialize completion used to notify completion of inst current */
+ init_completion(&di->ab8500_fg_complete);
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_fg_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_fg_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
+ }
+ di->irq = platform_get_irq_byname(pdev, "CCEOC");
+ disable_irq(di->irq);
+
+ platform_set_drvdata(pdev, di);
+
+ ret = ab8500_fg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_irq;
+ }
+
+ /* Calibrate the fg first time */
+ di->flags.calibrate = true;
+ di->calib_state = AB8500_FG_CALIB_INIT;
+
+ /* Use room temp as default value until we get an update from driver. */
+ di->bat_temp = 210;
+
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ list_add_tail(&di->node, &ab8500_fg_list);
+
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->fg_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+ free_irq(irq, di);
+ }
+free_inst_curr_wq:
+ destroy_workqueue(di->fg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_fg_driver = {
+ .probe = ab8500_fg_probe,
+ .remove = __devexit_p(ab8500_fg_remove),
+ .suspend = ab8500_fg_suspend,
+ .resume = ab8500_fg_resume,
+ .driver = {
+ .name = "ab8500-fg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_fg_init(void)
+{
+ return platform_driver_register(&ab8500_fg_driver);
+}
+
+static void __exit ab8500_fg_exit(void)
+{
+ platform_driver_unregister(&ab8500_fg_driver);
+}
+
+subsys_initcall_sync(ab8500_fg_init);
+module_exit(ab8500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-fg");
+MODULE_DESCRIPTION("AB8500 Fuel Gauge driver");
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
new file mode 100644
index 000000000000..804b88c760d6
--- /dev/null
+++ b/drivers/power/abx500_chargalg.c
@@ -0,0 +1,1921 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Charging algorithm driver for abx500 variants
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ * Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL (6 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT 10
+
+/* Recharge criteria counter */
+#define RCH_COND_CNT 3
+
+#define to_abx500_chargalg_device_info(x) container_of((x), \
+ struct abx500_chargalg, chargalg_psy);
+
+enum abx500_chargers {
+ NO_CHG,
+ AC_CHG,
+ USB_CHG,
+};
+
+struct abx500_chargalg_charger_info {
+ enum abx500_chargers conn_chg;
+ enum abx500_chargers prev_conn_chg;
+ enum abx500_chargers online_chg;
+ enum abx500_chargers prev_online_chg;
+ enum abx500_chargers charger_type;
+ bool usb_chg_ok;
+ bool ac_chg_ok;
+ int usb_volt;
+ int usb_curr;
+ int ac_volt;
+ int ac_curr;
+ int usb_vset;
+ int usb_iset;
+ int ac_vset;
+ int ac_iset;
+};
+
+struct abx500_chargalg_suspension_status {
+ bool suspended_change;
+ bool ac_suspended;
+ bool usb_suspended;
+};
+
+struct abx500_chargalg_battery_data {
+ int temp;
+ int volt;
+ int avg_curr;
+ int inst_curr;
+ int percent;
+};
+
+enum abx500_chargalg_states {
+ STATE_HANDHELD_INIT,
+ STATE_HANDHELD,
+ STATE_CHG_NOT_OK_INIT,
+ STATE_CHG_NOT_OK,
+ STATE_HW_TEMP_PROTECT_INIT,
+ STATE_HW_TEMP_PROTECT,
+ STATE_NORMAL_INIT,
+ STATE_NORMAL,
+ STATE_WAIT_FOR_RECHARGE_INIT,
+ STATE_WAIT_FOR_RECHARGE,
+ STATE_MAINTENANCE_A_INIT,
+ STATE_MAINTENANCE_A,
+ STATE_MAINTENANCE_B_INIT,
+ STATE_MAINTENANCE_B,
+ STATE_TEMP_UNDEROVER_INIT,
+ STATE_TEMP_UNDEROVER,
+ STATE_TEMP_LOWHIGH_INIT,
+ STATE_TEMP_LOWHIGH,
+ STATE_SUSPENDED_INIT,
+ STATE_SUSPENDED,
+ STATE_OVV_PROTECT_INIT,
+ STATE_OVV_PROTECT,
+ STATE_SAFETY_TIMER_EXPIRED_INIT,
+ STATE_SAFETY_TIMER_EXPIRED,
+ STATE_BATT_REMOVED_INIT,
+ STATE_BATT_REMOVED,
+ STATE_WD_EXPIRED_INIT,
+ STATE_WD_EXPIRED,
+};
+
+static const char *states[] = {
+ "HANDHELD_INIT",
+ "HANDHELD",
+ "CHG_NOT_OK_INIT",
+ "CHG_NOT_OK",
+ "HW_TEMP_PROTECT_INIT",
+ "HW_TEMP_PROTECT",
+ "NORMAL_INIT",
+ "NORMAL",
+ "WAIT_FOR_RECHARGE_INIT",
+ "WAIT_FOR_RECHARGE",
+ "MAINTENANCE_A_INIT",
+ "MAINTENANCE_A",
+ "MAINTENANCE_B_INIT",
+ "MAINTENANCE_B",
+ "TEMP_UNDEROVER_INIT",
+ "TEMP_UNDEROVER",
+ "TEMP_LOWHIGH_INIT",
+ "TEMP_LOWHIGH",
+ "SUSPENDED_INIT",
+ "SUSPENDED",
+ "OVV_PROTECT_INIT",
+ "OVV_PROTECT",
+ "SAFETY_TIMER_EXPIRED_INIT",
+ "SAFETY_TIMER_EXPIRED",
+ "BATT_REMOVED_INIT",
+ "BATT_REMOVED",
+ "WD_EXPIRED_INIT",
+ "WD_EXPIRED",
+};
+
+struct abx500_chargalg_events {
+ bool batt_unknown;
+ bool mainextchnotok;
+ bool batt_ovv;
+ bool batt_rem;
+ bool btemp_underover;
+ bool btemp_lowhigh;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool main_ovv;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool safety_timer_expired;
+ bool maintenance_timer_expired;
+ bool ac_wd_expired;
+ bool usb_wd_expired;
+ bool ac_cv_active;
+ bool usb_cv_active;
+ bool vbus_collapsed;
+};
+
+/**
+ * struct abx500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset: the non optimized/maximised charger current
+ * @current_iset: the charging current used at this moment
+ * @test_delta_i: the delta between the current we want to charge and the
+ current that is really going into the battery
+ * @condition_cnt: number of iterations needed before a new charger current
+ is set
+ * @max_current: maximum charger current
+ * @wait_cnt: to avoid too fast current step down in case of charger
+ * voltage collapse, we insert this delay between step
+ * down
+ * @level: tells in how many steps the charging current has been
+ increased
+ */
+struct abx500_charge_curr_maximization {
+ int original_iset;
+ int current_iset;
+ int test_delta_i;
+ int condition_cnt;
+ int max_current;
+ int wait_cnt;
+ u8 level;
+};
+
+enum maxim_ret {
+ MAXIM_RET_NOACTION,
+ MAXIM_RET_CHANGE,
+ MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct abx500_chargalg - abx500 Charging algorithm device information
+ * @dev: pointer to the structure device
+ * @charge_status: battery operating status
+ * @eoc_cnt: counter used to determine end-of_charge
+ * @rch_cnt: counter used to determine start of recharge
+ * @maintenance_chg: indicate if maintenance charge is active
+ * @t_hyst_norm temperature hysteresis when the temperature has been
+ * over or under normal limits
+ * @t_hyst_lowhigh temperature hysteresis when the temperature has been
+ * over or under the high or low limits
+ * @charge_state: current state of the charging algorithm
+ * @ccm charging current maximization parameters
+ * @chg_info: information about connected charger types
+ * @batt_data: data of the battery
+ * @susp_status: current charger suspension status
+ * @pdata: pointer to the abx500_chargalg platform data
+ * @bat: pointer to the abx500_bm platform data
+ * @chargalg_psy: structure that holds the battery properties exposed by
+ * the charging algorithm
+ * @events: structure for information about events triggered
+ * @chargalg_wq: work queue for running the charging algorithm
+ * @chargalg_periodic_work: work to run the charging algorithm periodically
+ * @chargalg_wd_work: work to kick the charger watchdog periodically
+ * @chargalg_work: work to run the charging algorithm instantly
+ * @safety_timer: charging safety timer
+ * @maintenance_timer: maintenance charging timer
+ * @chargalg_kobject: structure of type kobject
+ */
+struct abx500_chargalg {
+ struct device *dev;
+ int charge_status;
+ int eoc_cnt;
+ int rch_cnt;
+ bool maintenance_chg;
+ int t_hyst_norm;
+ int t_hyst_lowhigh;
+ enum abx500_chargalg_states charge_state;
+ struct abx500_charge_curr_maximization ccm;
+ struct abx500_chargalg_charger_info chg_info;
+ struct abx500_chargalg_battery_data batt_data;
+ struct abx500_chargalg_suspension_status susp_status;
+ struct abx500_chargalg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply chargalg_psy;
+ struct ux500_charger *ac_chg;
+ struct ux500_charger *usb_chg;
+ struct abx500_chargalg_events events;
+ struct workqueue_struct *chargalg_wq;
+ struct delayed_work chargalg_periodic_work;
+ struct delayed_work chargalg_wd_work;
+ struct work_struct chargalg_work;
+ struct timer_list safety_timer;
+ struct timer_list maintenance_timer;
+ struct kobject chargalg_kobject;
+};
+
+/* Main battery properties */
+static enum power_supply_property abx500_chargalg_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+/**
+ * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @data: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static void abx500_chargalg_safety_timer_expired(unsigned long data)
+{
+ struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+ dev_err(di->dev, "Safety timer expired\n");
+ di->events.safety_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @i: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static void abx500_chargalg_maintenance_timer_expired(unsigned long data)
+{
+
+ struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+ dev_dbg(di->dev, "Maintenance timer expired\n");
+ di->events.maintenance_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_state_to() - Change charge state
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void abx500_chargalg_state_to(struct abx500_chargalg *di,
+ enum abx500_chargalg_states state)
+{
+ dev_dbg(di->dev,
+ "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+ di->charge_state == state ? "NO" : "YES",
+ di->charge_state,
+ states[di->charge_state],
+ state,
+ states[state]);
+
+ di->charge_state = state;
+}
+
+/**
+ * abx500_chargalg_check_charger_connection() - Check charger connection change
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
+{
+ if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+ di->susp_status.suspended_change) {
+ /*
+ * Charger state changed or suspension
+ * has changed since last update
+ */
+ if ((di->chg_info.conn_chg & AC_CHG) &&
+ !di->susp_status.ac_suspended) {
+ dev_dbg(di->dev, "Charging source is AC\n");
+ if (di->chg_info.charger_type != AC_CHG) {
+ di->chg_info.charger_type = AC_CHG;
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ } else if ((di->chg_info.conn_chg & USB_CHG) &&
+ !di->susp_status.usb_suspended) {
+ dev_dbg(di->dev, "Charging source is USB\n");
+ di->chg_info.charger_type = USB_CHG;
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else if (di->chg_info.conn_chg &&
+ (di->susp_status.ac_suspended ||
+ di->susp_status.usb_suspended)) {
+ dev_dbg(di->dev, "Charging is suspended\n");
+ di->chg_info.charger_type = NO_CHG;
+ abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+ } else {
+ dev_dbg(di->dev, "Charging source is OFF\n");
+ di->chg_info.charger_type = NO_CHG;
+ abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+ di->susp_status.suspended_change = false;
+ }
+ return di->chg_info.conn_chg;
+}
+
+/**
+ * abx500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
+{
+ unsigned long timer_expiration = 0;
+
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->main_safety_tmr_h * 3600 * HZ));
+ break;
+
+ case USB_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->usb_safety_tmr_h * 3600 * HZ));
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+
+ di->events.safety_timer_expired = false;
+ di->safety_timer.expires = timer_expiration;
+ if (!timer_pending(&di->safety_timer))
+ add_timer(&di->safety_timer);
+ else
+ mod_timer(&di->safety_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
+{
+ di->events.safety_timer_expired = false;
+ del_timer(&di->safety_timer);
+}
+
+/**
+ * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di: pointer to the abx500_chargalg structure
+ * @duration: duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
+ int duration)
+{
+ unsigned long timer_expiration;
+
+ /* Convert from hours to jiffies */
+ timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
+
+ di->events.maintenance_timer_expired = false;
+ di->maintenance_timer.expires = timer_expiration;
+ if (!timer_pending(&di->maintenance_timer))
+ add_timer(&di->maintenance_timer);
+ else
+ mod_timer(&di->maintenance_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
+{
+ di->events.maintenance_timer_expired = false;
+ del_timer(&di->maintenance_timer);
+}
+
+/**
+ * abx500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
+{
+ /* Check if charger exists and kick watchdog if charging */
+ if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+ di->chg_info.online_chg & AC_CHG)
+ return di->ac_chg->ops.kick_wd(di->ac_chg);
+ else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+ di->chg_info.online_chg & USB_CHG)
+ return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+ return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di: pointer to the abx500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->ac_chg || !di->ac_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->ac_chg->max_out_volt)
+ vset = min(vset, di->ac_chg->max_out_volt);
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+ di->chg_info.ac_vset = vset;
+
+ return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di: pointer to the abx500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->usb_chg || !di->usb_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->usb_chg->max_out_volt)
+ vset = min(vset, di->usb_chg->max_out_volt);
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+ di->chg_info.usb_vset = vset;
+
+ return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_update_chg_curr() - Update charger current
+ * @di: pointer to the abx500_chargalg structure
+ * @iset: requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
+ int iset)
+{
+ /* Check if charger exists and update current if charging */
+ if (di->ac_chg && di->ac_chg->ops.update_curr &&
+ di->chg_info.charger_type & AC_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+
+ return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+ } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+ di->chg_info.charger_type & USB_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+
+ return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+ }
+
+ return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_stop_charging() - Stop charging
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
+{
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_hold_charging() - Pauses charging
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
+{
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_start_charging() - Start the charger
+ * @di: pointer to the abx500_chargalg structure
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
+ int vset, int iset)
+{
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ dev_dbg(di->dev,
+ "AC parameters: Vset %d, Ich %d\n", vset, iset);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_ac_en(di, true, vset, iset);
+ break;
+
+ case USB_CHG:
+ dev_dbg(di->dev,
+ "USB parameters: Vset %d, Ich %d\n", vset, iset);
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, true, vset, iset);
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+}
+
+/**
+ * abx500_chargalg_check_temp() - Check battery temperature ranges
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
+{
+ if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
+ di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+ /* Temp OK! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = 0;
+ } else {
+ if (((di->batt_data.temp >= di->bat->temp_high) &&
+ (di->batt_data.temp <
+ (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+ ((di->batt_data.temp >
+ (di->bat->temp_under + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= di->bat->temp_low))) {
+ /* TEMP minor!!!!! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = true;
+ di->t_hyst_norm = di->bat->temp_hysteresis;
+ di->t_hyst_lowhigh = 0;
+ } else if (di->batt_data.temp <= di->bat->temp_under ||
+ di->batt_data.temp >= di->bat->temp_over) {
+ /* TEMP major!!!!! */
+ di->events.btemp_underover = true;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+ } else {
+ /* Within hysteresis */
+ dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+ "hyst_lowhigh %d, hyst normal %d\n",
+ di->batt_data.temp, di->t_hyst_lowhigh,
+ di->t_hyst_norm);
+ }
+ }
+}
+
+/**
+ * abx500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
+{
+ if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+ di->chg_info.usb_chg_ok = false;
+ else
+ di->chg_info.usb_chg_ok = true;
+
+ if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+ di->chg_info.ac_chg_ok = false;
+ else
+ di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
+{
+ if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ di->charge_state == STATE_NORMAL &&
+ !di->maintenance_chg && (di->batt_data.volt >=
+ di->bat->bat_type[di->bat->batt_id].termination_vol ||
+ di->events.usb_cv_active || di->events.ac_cv_active) &&
+ di->batt_data.avg_curr <
+ di->bat->bat_type[di->bat->batt_id].termination_curr &&
+ di->batt_data.avg_curr > 0) {
+ if (++di->eoc_cnt >= EOC_COND_CNT) {
+ di->eoc_cnt = 0;
+ di->charge_status = POWER_SUPPLY_STATUS_FULL;
+ di->maintenance_chg = true;
+ dev_dbg(di->dev, "EOC reached!\n");
+ power_supply_changed(&di->chargalg_psy);
+ } else {
+ dev_dbg(di->dev,
+ " EOC limit reached for the %d"
+ " time, out of %d before EOC\n",
+ di->eoc_cnt,
+ EOC_COND_CNT);
+ }
+ } else {
+ di->eoc_cnt = 0;
+ }
+}
+
+static void init_maxim_chg_curr(struct abx500_chargalg *di)
+{
+ di->ccm.original_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.current_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
+ di->ccm.max_current = di->bat->maxi->chg_curr;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.level = 0;
+}
+
+/**
+ * abx500_chargalg_chg_curr_maxim - increases the charger current to
+ * compensate for the system load
+ * @di pointer to the abx500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
+{
+ int delta_i;
+
+ if (!di->bat->maxi->ena_maxi)
+ return MAXIM_RET_NOACTION;
+
+ delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+ if (di->events.vbus_collapsed) {
+ dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+ di->ccm.wait_cnt);
+ if (di->ccm.wait_cnt == 0) {
+ dev_dbg(di->dev, "lowering current\n");
+ di->ccm.wait_cnt++;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.max_current =
+ di->ccm.current_iset - di->ccm.test_delta_i;
+ di->ccm.current_iset = di->ccm.max_current;
+ di->ccm.level--;
+ return MAXIM_RET_CHANGE;
+ } else {
+ dev_dbg(di->dev, "waiting\n");
+ /* Let's go in here twice before lowering curr again */
+ di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+ return MAXIM_RET_NOACTION;
+ }
+ }
+
+ di->ccm.wait_cnt = 0;
+
+ if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+ dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+ " (limit %dmA) (current iset: %dmA)!\n",
+ di->batt_data.inst_curr, di->ccm.original_iset,
+ di->ccm.current_iset);
+
+ if (di->ccm.current_iset == di->ccm.original_iset)
+ return MAXIM_RET_NOACTION;
+
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset = di->ccm.original_iset;
+ di->ccm.level = 0;
+
+ return MAXIM_RET_IBAT_TOO_HIGH;
+ }
+
+ if (delta_i > di->ccm.test_delta_i &&
+ (di->ccm.current_iset + di->ccm.test_delta_i) <
+ di->ccm.max_current) {
+ if (di->ccm.condition_cnt-- == 0) {
+ /* Increse the iset with cco.test_delta_i */
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset += di->ccm.test_delta_i;
+ di->ccm.level++;
+ dev_dbg(di->dev, " Maximization needed, increase"
+ " with %d mA to %dmA (Optimal ibat: %d)"
+ " Level %d\n",
+ di->ccm.test_delta_i,
+ di->ccm.current_iset,
+ di->ccm.original_iset,
+ di->ccm.level);
+ return MAXIM_RET_CHANGE;
+ } else {
+ return MAXIM_RET_NOACTION;
+ }
+ } else {
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ return MAXIM_RET_NOACTION;
+ }
+}
+
+static void handle_maxim_chg_curr(struct abx500_chargalg *di)
+{
+ enum maxim_ret ret;
+ int result;
+
+ ret = abx500_chargalg_chg_curr_maxim(di);
+ switch (ret) {
+ case MAXIM_RET_CHANGE:
+ result = abx500_chargalg_update_chg_curr(di,
+ di->ccm.current_iset);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+ case MAXIM_RET_IBAT_TOO_HIGH:
+ result = abx500_chargalg_update_chg_curr(di,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+
+ case MAXIM_RET_NOACTION:
+ default:
+ /* Do nothing..*/
+ break;
+ }
+}
+
+static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct abx500_chargalg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_abx500_chargalg_device_info(psy);
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ /* Initialize chargers if not already done */
+ if (!di->ac_chg &&
+ ext->type == POWER_SUPPLY_TYPE_MAINS)
+ di->ac_chg = psy_to_ux500_charger(ext);
+ else if (!di->usb_chg &&
+ ext->type == POWER_SUPPLY_TYPE_USB)
+ di->usb_chg = psy_to_ux500_charger(ext);
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ /* Battery present */
+ if (ret.intval)
+ di->events.batt_rem = false;
+ /* Battery removed */
+ else
+ di->events.batt_rem = true;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~AC_CHG;
+ }
+ /* AC connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= AC_CHG;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~USB_CHG;
+ }
+ /* USB connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= USB_CHG;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~AC_CHG;
+ }
+ /* AC online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= AC_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~USB_CHG;
+ }
+ /* USB online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= USB_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.mainextchnotok = true;
+ di->events.main_thermal_prot = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.ac_wd_expired = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.main_thermal_prot = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.main_thermal_prot = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.main_ovv = true;
+ di->events.mainextchnotok = false;
+ di->events.main_thermal_prot = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.main_thermal_prot = false;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_TYPE_USB:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.usbchargernotok = true;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.usb_wd_expired = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.usb_thermal_prot = true;
+ di->events.usbchargernotok = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.vbus_ovv = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_volt = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.ac_cv_active = true;
+ else
+ di->events.ac_cv_active = false;
+
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.usb_cv_active = true;
+ else
+ di->events.usb_cv_active = false;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->events.batt_unknown = false;
+ else
+ di->events.batt_unknown = true;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ di->batt_data.temp = ret.intval / 10;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.inst_curr = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.avg_curr = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ if (ret.intval)
+ di->events.vbus_collapsed = true;
+ else
+ di->events.vbus_collapsed = false;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ di->batt_data.percent = ret.intval;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * abx500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void abx500_chargalg_external_power_changed(struct power_supply *psy)
+{
+ struct abx500_chargalg *di = to_abx500_chargalg_device_info(psy);
+
+ /*
+ * Trigger execution of the algorithm instantly and read
+ * all power_supply properties there instead
+ */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_algorithm() - Main function for the algorithm
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
+{
+ int charger_status;
+
+ /* Collect data from all power_supply class devices */
+ class_for_each_device(power_supply_class, NULL,
+ &di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
+
+ abx500_chargalg_end_of_charge(di);
+ abx500_chargalg_check_temp(di);
+ abx500_chargalg_check_charger_voltage(di);
+
+ charger_status = abx500_chargalg_check_charger_connection(di);
+ /*
+ * First check if we have a charger connected.
+ * Also we don't allow charging of unknown batteries if configured
+ * this way
+ */
+ if (!charger_status ||
+ (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+ if (di->charge_state != STATE_HANDHELD) {
+ di->events.safety_timer_expired = false;
+ abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ }
+
+ /* If suspended, we should not continue checking the flags */
+ else if (di->charge_state == STATE_SUSPENDED_INIT ||
+ di->charge_state == STATE_SUSPENDED) {
+ /* We don't do anything here, just don,t continue */
+ }
+
+ /* Safety timer expiration */
+ else if (di->events.safety_timer_expired) {
+ if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+ abx500_chargalg_state_to(di,
+ STATE_SAFETY_TIMER_EXPIRED_INIT);
+ }
+ /*
+ * Check if any interrupts has occured
+ * that will prevent us from charging
+ */
+
+ /* Battery removed */
+ else if (di->events.batt_rem) {
+ if (di->charge_state != STATE_BATT_REMOVED)
+ abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+ }
+ /* Main or USB charger not ok. */
+ else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+ /*
+ * If vbus_collapsed is set, we have to lower the charger
+ * current, which is done in the normal state below
+ */
+ if (di->charge_state != STATE_CHG_NOT_OK &&
+ !di->events.vbus_collapsed)
+ abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+ }
+ /* VBUS, Main or VBAT OVV. */
+ else if (di->events.vbus_ovv ||
+ di->events.main_ovv ||
+ di->events.batt_ovv ||
+ !di->chg_info.usb_chg_ok ||
+ !di->chg_info.ac_chg_ok) {
+ if (di->charge_state != STATE_OVV_PROTECT)
+ abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+ }
+ /* USB Thermal, stop charging */
+ else if (di->events.main_thermal_prot ||
+ di->events.usb_thermal_prot) {
+ if (di->charge_state != STATE_HW_TEMP_PROTECT)
+ abx500_chargalg_state_to(di,
+ STATE_HW_TEMP_PROTECT_INIT);
+ }
+ /* Battery temp over/under */
+ else if (di->events.btemp_underover) {
+ if (di->charge_state != STATE_TEMP_UNDEROVER)
+ abx500_chargalg_state_to(di,
+ STATE_TEMP_UNDEROVER_INIT);
+ }
+ /* Watchdog expired */
+ else if (di->events.ac_wd_expired ||
+ di->events.usb_wd_expired) {
+ if (di->charge_state != STATE_WD_EXPIRED)
+ abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+ }
+ /* Battery temp high/low */
+ else if (di->events.btemp_lowhigh) {
+ if (di->charge_state != STATE_TEMP_LOWHIGH)
+ abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+ }
+
+ dev_dbg(di->dev,
+ "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+ "State %s Active_chg %d Chg_status %d AC %d USB %d "
+ "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+ "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+ di->batt_data.volt,
+ di->batt_data.avg_curr,
+ di->batt_data.inst_curr,
+ di->batt_data.temp,
+ di->batt_data.percent,
+ di->maintenance_chg,
+ states[di->charge_state],
+ di->chg_info.charger_type,
+ di->charge_status,
+ di->chg_info.conn_chg & AC_CHG,
+ di->chg_info.conn_chg & USB_CHG,
+ di->chg_info.online_chg & AC_CHG,
+ di->chg_info.online_chg & USB_CHG,
+ di->events.ac_cv_active,
+ di->events.usb_cv_active,
+ di->chg_info.ac_curr,
+ di->chg_info.usb_curr,
+ di->chg_info.ac_vset,
+ di->chg_info.ac_iset,
+ di->chg_info.usb_vset,
+ di->chg_info.usb_iset);
+
+ switch (di->charge_state) {
+ case STATE_HANDHELD_INIT:
+ abx500_chargalg_stop_charging(di);
+ di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ abx500_chargalg_state_to(di, STATE_HANDHELD);
+ /* Intentional fallthrough */
+
+ case STATE_HANDHELD:
+ break;
+
+ case STATE_SUSPENDED_INIT:
+ if (di->susp_status.ac_suspended)
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ if (di->susp_status.usb_suspended)
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ abx500_chargalg_state_to(di, STATE_SUSPENDED);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_SUSPENDED:
+ /* CHARGING is suspended */
+ break;
+
+ case STATE_BATT_REMOVED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
+ /* Intentional fallthrough */
+
+ case STATE_BATT_REMOVED:
+ if (!di->events.batt_rem)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_HW_TEMP_PROTECT_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_HW_TEMP_PROTECT:
+ if (!di->events.main_thermal_prot &&
+ !di->events.usb_thermal_prot)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_OVV_PROTECT_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_OVV_PROTECT:
+ if (!di->events.vbus_ovv &&
+ !di->events.main_ovv &&
+ !di->events.batt_ovv &&
+ di->chg_info.usb_chg_ok &&
+ di->chg_info.ac_chg_ok)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_CHG_NOT_OK_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+ /* Intentional fallthrough */
+
+ case STATE_CHG_NOT_OK:
+ if (!di->events.mainextchnotok &&
+ !di->events.usbchargernotok)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_SAFETY_TIMER_EXPIRED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_SAFETY_TIMER_EXPIRED:
+ /* We exit this state when charger is removed */
+ break;
+
+ case STATE_NORMAL_INIT:
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_NORMAL);
+ abx500_chargalg_start_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ init_maxim_chg_curr(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->eoc_cnt = 0;
+ di->maintenance_chg = false;
+ power_supply_changed(&di->chargalg_psy);
+
+ break;
+
+ case STATE_NORMAL:
+ handle_maxim_chg_curr(di);
+ if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+ di->maintenance_chg) {
+ if (di->bat->no_maintenance)
+ abx500_chargalg_state_to(di,
+ STATE_WAIT_FOR_RECHARGE_INIT);
+ else
+ abx500_chargalg_state_to(di,
+ STATE_MAINTENANCE_A_INIT);
+ }
+ break;
+
+ /* This state will be used when the maintenance state is disabled */
+ case STATE_WAIT_FOR_RECHARGE_INIT:
+ abx500_chargalg_hold_charging(di);
+ abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+ di->rch_cnt = RCH_COND_CNT;
+ /* Intentional fallthrough */
+
+ case STATE_WAIT_FOR_RECHARGE:
+ if (di->batt_data.volt <=
+ di->bat->bat_type[di->bat->batt_id].recharge_vol) {
+ if (di->rch_cnt-- == 0)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else
+ di->rch_cnt = RCH_COND_CNT;
+ break;
+
+ case STATE_MAINTENANCE_A_INIT:
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_chg_timer_h);
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_A:
+ if (di->events.maintenance_timer_expired) {
+ abx500_chargalg_stop_maintenance_timer(di);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+ }
+ break;
+
+ case STATE_MAINTENANCE_B_INIT:
+ abx500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_chg_timer_h);
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_B:
+ if (di->events.maintenance_timer_expired) {
+ abx500_chargalg_stop_maintenance_timer(di);
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ break;
+
+ case STATE_TEMP_LOWHIGH_INIT:
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_cur_lvl);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_LOWHIGH:
+ if (!di->events.btemp_lowhigh)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_WD_EXPIRED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_WD_EXPIRED:
+ if (!di->events.ac_wd_expired &&
+ !di->events.usb_wd_expired)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_TEMP_UNDEROVER_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_UNDEROVER:
+ if (!di->events.btemp_underover)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+ }
+
+ /* Start charging directly if the new state is a charge state */
+ if (di->charge_state == STATE_NORMAL_INIT ||
+ di->charge_state == STATE_MAINTENANCE_A_INIT ||
+ di->charge_state == STATE_MAINTENANCE_B_INIT)
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void abx500_chargalg_periodic_work(struct work_struct *work)
+{
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_periodic_work.work);
+
+ abx500_chargalg_algorithm(di);
+
+ /*
+ * If a charger is connected then the battery has to be monitored
+ * frequently, else the work can be delayed.
+ */
+ if (di->chg_info.conn_chg)
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_charging * HZ);
+ else
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_not_charging * HZ);
+}
+
+/**
+ * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void abx500_chargalg_wd_work(struct work_struct *work)
+{
+ int ret;
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_wd_work.work);
+
+ dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
+
+ ret = abx500_chargalg_kick_watchdog(di);
+ if (ret < 0)
+ dev_err(di->dev, "failed to kick watchdog\n");
+
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * abx500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void abx500_chargalg_work(struct work_struct *work)
+{
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_work);
+
+ abx500_chargalg_algorithm(di);
+}
+
+/**
+ * abx500_chargalg_get_property() - get the chargalg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status: charging/discharging/full/unknown
+ * health: health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int abx500_chargalg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct abx500_chargalg *di;
+
+ di = to_abx500_chargalg_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = di->charge_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->events.batt_ovv) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else if (di->events.btemp_underover) {
+ if (di->batt_data.temp <= di->bat->temp_under)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else {
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+/**
+ * abx500_chargalg_sysfs_charger() - sysfs store operations
+ * @kobj: pointer to the struct kobject
+ * @attr: pointer to the struct attribute
+ * @buf: buffer that holds the parameter passed from userspace
+ * @length: length of the parameter passed
+ *
+ * Returns length of the buffer(input taken from user space) on success
+ * else error code on failure
+ * The operation to be performed on passing the parameters from the user space.
+ */
+static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t length)
+{
+ struct abx500_chargalg *di = container_of(kobj,
+ struct abx500_chargalg, chargalg_kobject);
+ long int param;
+ int ac_usb;
+ int ret;
+ char entry = *attr->name;
+
+ switch (entry) {
+ case 'c':
+ ret = strict_strtol(buf, 10, &param);
+ if (ret < 0)
+ return ret;
+
+ ac_usb = param;
+ switch (ac_usb) {
+ case 0:
+ /* Disable charging */
+ di->susp_status.ac_suspended = true;
+ di->susp_status.usb_suspended = true;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 1:
+ /* Enable AC Charging */
+ di->susp_status.ac_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 2:
+ /* Enable USB charging */
+ di->susp_status.usb_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ default:
+ dev_info(di->dev, "Wrong input\n"
+ "Enter 0. Disable AC/USB Charging\n"
+ "1. Enable AC charging\n"
+ "2. Enable USB Charging\n");
+ };
+ break;
+ };
+ return strlen(buf);
+}
+
+static struct attribute abx500_chargalg_en_charger = \
+{
+ .name = "chargalg",
+ .mode = S_IWUGO,
+};
+
+static struct attribute *abx500_chargalg_chg[] = {
+ &abx500_chargalg_en_charger,
+ NULL
+};
+
+static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
+ .store = abx500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type abx500_chargalg_ktype = {
+ .sysfs_ops = &abx500_chargalg_sysfs_ops,
+ .default_attrs = abx500_chargalg_chg,
+};
+
+/**
+ * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct abx500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
+{
+ kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * abx500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct abx500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->chargalg_kobject,
+ &abx500_chargalg_ktype,
+ NULL, "abx500_chargalg");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int abx500_chargalg_resume(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* Kick charger watchdog if charging (any charger online) */
+ if (di->chg_info.online_chg)
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+ /*
+ * Run the charging algorithm directly to be sure we don't
+ * do it too seldom
+ */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ return 0;
+}
+
+static int abx500_chargalg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ if (di->chg_info.online_chg)
+ cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+ cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+ return 0;
+}
+#else
+#define abx500_chargalg_suspend NULL
+#define abx500_chargalg_resume NULL
+#endif
+
+static int __devexit abx500_chargalg_remove(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* sysfs interface to enable/disbale charging from user space */
+ abx500_chargalg_sysfs_exit(di);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->chargalg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->chargalg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
+{
+ struct abx500_bm_plat_data *plat_data;
+ int ret = 0;
+
+ struct abx500_chargalg *di =
+ kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get device struct */
+ di->dev = &pdev->dev;
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->chargalg;
+ di->bat = plat_data->battery;
+
+ /* chargalg supply */
+ di->chargalg_psy.name = "abx500_chargalg";
+ di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->chargalg_psy.properties = abx500_chargalg_props;
+ di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
+ di->chargalg_psy.get_property = abx500_chargalg_get_property;
+ di->chargalg_psy.supplied_to = di->pdata->supplied_to;
+ di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.external_power_changed =
+ abx500_chargalg_external_power_changed;
+
+ /* Initilialize safety timer */
+ init_timer(&di->safety_timer);
+ di->safety_timer.function = abx500_chargalg_safety_timer_expired;
+ di->safety_timer.data = (unsigned long) di;
+
+ /* Initilialize maintenance timer */
+ init_timer(&di->maintenance_timer);
+ di->maintenance_timer.function =
+ abx500_chargalg_maintenance_timer_expired;
+ di->maintenance_timer.data = (unsigned long) di;
+
+ /* Create a work queue for the chargalg */
+ di->chargalg_wq =
+ create_singlethread_workqueue("abx500_chargalg_wq");
+ if (di->chargalg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for chargalg */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+ abx500_chargalg_periodic_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+ abx500_chargalg_wd_work);
+
+ /* Init work for chargalg */
+ INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
+
+ /* To detect charger at startup */
+ di->chg_info.prev_conn_chg = -1;
+
+ /* Register chargalg power supply class */
+ ret = power_supply_register(di->dev, &di->chargalg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register chargalg psy\n");
+ goto free_chargalg_wq;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* sysfs interface to enable/disable charging from user space */
+ ret = abx500_chargalg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_psy;
+ }
+
+ /* Run the charging algorithm */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_psy:
+ power_supply_unregister(&di->chargalg_psy);
+free_chargalg_wq:
+ destroy_workqueue(di->chargalg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver abx500_chargalg_driver = {
+ .probe = abx500_chargalg_probe,
+ .remove = __devexit_p(abx500_chargalg_remove),
+ .suspend = abx500_chargalg_suspend,
+ .resume = abx500_chargalg_resume,
+ .driver = {
+ .name = "abx500-chargalg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init abx500_chargalg_init(void)
+{
+ return platform_driver_register(&abx500_chargalg_driver);
+}
+
+static void __exit abx500_chargalg_exit(void)
+{
+ platform_driver_unregister(&abx500_chargalg_driver);
+}
+
+module_init(abx500_chargalg_init);
+module_exit(abx500_chargalg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:abx500-chargalg");
+MODULE_DESCRIPTION("abx500 battery charging algorithm");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 88fd9710bda2..9eca9f1ff0ea 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -134,12 +134,11 @@ static int get_batt_uV(struct charger_manager *cm, int *uV)
union power_supply_propval val;
int ret;
- if (cm->fuel_gauge)
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
- POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
- else
+ if (!cm->fuel_gauge)
return -ENODEV;
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
if (ret)
return ret;
@@ -245,9 +244,7 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
struct charger_desc *desc = cm->desc;
/* Ignore if it's redundent command */
- if (enable && cm->charger_enabled)
- return 0;
- if (!enable && !cm->charger_enabled)
+ if (enable == cm->charger_enabled)
return 0;
if (enable) {
@@ -309,9 +306,7 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
return; /* Duplicated. */
- else
- strncpy(env_str_save, event, UEVENT_BUF_SIZE);
-
+ strncpy(env_str_save, event, UEVENT_BUF_SIZE);
return;
}
@@ -387,8 +382,10 @@ static bool cm_monitor(void)
mutex_lock(&cm_list_mtx);
- list_for_each_entry(cm, &cm_list, entry)
- stop = stop || _cm_monitor(cm);
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (_cm_monitor(cm))
+ stop = true;
+ }
mutex_unlock(&cm_list_mtx);
@@ -402,7 +399,8 @@ static int charger_get_property(struct power_supply *psy,
struct charger_manager *cm = container_of(psy,
struct charger_manager, charger_psy);
struct charger_desc *desc = cm->desc;
- int i, ret = 0, uV;
+ int ret = 0;
+ int uV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -428,8 +426,7 @@ static int charger_get_property(struct power_supply *psy,
val->intval = 0;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = get_batt_uV(cm, &i);
- val->intval = i;
+ ret = get_batt_uV(cm, &val->intval);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
@@ -697,8 +694,10 @@ bool cm_suspend_again(void)
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
- cm->status_save_batt != is_batt_present(cm))
+ cm->status_save_batt != is_batt_present(cm)) {
ret = false;
+ break;
+ }
}
mutex_unlock(&cm_list_mtx);
@@ -855,11 +854,10 @@ static int charger_manager_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cm);
- memcpy(&cm->charger_psy, &psy_default,
- sizeof(psy_default));
+ memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default));
+
if (!desc->psy_name) {
- strncpy(cm->psy_name_buf, psy_default.name,
- PSY_NAME_MAX);
+ strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX);
} else {
strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
}
@@ -894,15 +892,15 @@ static int charger_manager_probe(struct platform_device *pdev)
POWER_SUPPLY_PROP_CURRENT_NOW;
cm->charger_psy.num_properties++;
}
- if (!desc->measure_battery_temp) {
- cm->charger_psy.properties[cm->charger_psy.num_properties] =
- POWER_SUPPLY_PROP_TEMP_AMBIENT;
- cm->charger_psy.num_properties++;
- }
+
if (desc->measure_battery_temp) {
cm->charger_psy.properties[cm->charger_psy.num_properties] =
POWER_SUPPLY_PROP_TEMP;
cm->charger_psy.num_properties++;
+ } else {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ cm->charger_psy.num_properties++;
}
ret = power_supply_register(NULL, &cm->charger_psy);
@@ -933,9 +931,8 @@ static int charger_manager_probe(struct platform_device *pdev)
return 0;
err_chg_enable:
- if (desc->charger_regulators)
- regulator_bulk_free(desc->num_charger_regulators,
- desc->charger_regulators);
+ regulator_bulk_free(desc->num_charger_regulators,
+ desc->charger_regulators);
err_bulk_get:
power_supply_unregister(&cm->charger_psy);
err_register:
@@ -961,10 +958,8 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
list_del(&cm->entry);
mutex_unlock(&cm_list_mtx);
- if (desc->charger_regulators)
- regulator_bulk_free(desc->num_charger_regulators,
- desc->charger_regulators);
-
+ regulator_bulk_free(desc->num_charger_regulators,
+ desc->charger_regulators);
power_supply_unregister(&cm->charger_psy);
kfree(cm->charger_psy.properties);
kfree(cm->charger_stat);
@@ -982,9 +977,7 @@ MODULE_DEVICE_TABLE(platform, charger_manager_id);
static int cm_suspend_prepare(struct device *dev)
{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct charger_manager *cm = platform_get_drvdata(pdev);
+ struct charger_manager *cm = dev_get_drvdata(dev);
if (!cm_suspended) {
if (rtc_dev) {
@@ -1020,9 +1013,7 @@ static int cm_suspend_prepare(struct device *dev)
static void cm_suspend_complete(struct device *dev)
{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct charger_manager *cm = platform_get_drvdata(pdev);
+ struct charger_manager *cm = dev_get_drvdata(dev);
if (cm_suspended) {
if (rtc_dev) {
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index e8ea47a53dee..a5f6a0ec1572 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -612,6 +612,7 @@ static s32 __devinit da9052_bat_probe(struct platform_device *pdev)
if (ret)
goto err;
+ platform_set_drvdata(pdev, bat);
return 0;
err:
@@ -633,6 +634,7 @@ static int __devexit da9052_bat_remove(struct platform_device *pdev)
free_irq(bat->da9052->irq_base + irq, bat);
}
power_supply_unregister(&bat->psy);
+ kfree(bat);
return 0;
}
@@ -645,18 +647,7 @@ static struct platform_driver da9052_bat_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init da9052_bat_init(void)
-{
- return platform_driver_register(&da9052_bat_driver);
-}
-module_init(da9052_bat_init);
-
-static void __exit da9052_bat_exit(void)
-{
- platform_driver_unregister(&da9052_bat_driver);
-}
-module_exit(da9052_bat_exit);
+module_platform_driver(da9052_bat_driver);
MODULE_DESCRIPTION("DA9052 BAT Device Driver");
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index bfbce5de49da..6bb6e2f5ea81 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -403,18 +403,7 @@ static struct i2c_driver ds278x_battery_driver = {
.remove = ds278x_battery_remove,
.id_table = ds278x_id,
};
-
-static int __init ds278x_init(void)
-{
- return i2c_add_driver(&ds278x_battery_driver);
-}
-module_init(ds278x_init);
-
-static void __exit ds278x_exit(void)
-{
- i2c_del_driver(&ds278x_battery_driver);
-}
-module_exit(ds278x_exit);
+module_i2c_driver(ds278x_battery_driver);
MODULE_AUTHOR("Ryan Mallon");
MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 1289a5f790a1..39eb50f35f09 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -480,6 +480,7 @@ fail0:
dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
+ isp1704_charger_set_power(isp, 0);
return ret;
}
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index c53dd1292f81..d8b75780bfef 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -1,6 +1,7 @@
/*
- * Driver for LP8727 Micro/Mini USB IC with intergrated charger
+ * Driver for LP8727 Micro/Mini USB IC with integrated charger
*
+ * Copyright (C) 2011 Texas Instruments
* Copyright (C) 2011 National Semiconductor
*
* This program is free software; you can redistribute it and/or modify
@@ -25,7 +26,7 @@
#define INT1 0x4
#define INT2 0x5
#define STATUS1 0x6
-#define STATUS2 0x7
+#define STATUS2 0x7
#define CHGCTRL2 0x9
/* CTRL1 register */
@@ -91,7 +92,7 @@ struct lp8727_chg {
enum lp8727_dev_id devid;
};
-static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+static int lp8727_read_bytes(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
{
s32 ret;
@@ -102,29 +103,22 @@ static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
return (ret != len) ? -EIO : 0;
}
-static int lp8727_i2c_write(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+static inline int lp8727_read_byte(struct lp8727_chg *pchg, u8 reg, u8 *data)
{
- s32 ret;
+ return lp8727_read_bytes(pchg, reg, data, 1);
+}
+
+static int lp8727_write_byte(struct lp8727_chg *pchg, u8 reg, u8 data)
+{
+ int ret;
mutex_lock(&pchg->xfer_lock);
- ret = i2c_smbus_write_i2c_block_data(pchg->client, reg, len, data);
+ ret = i2c_smbus_write_byte_data(pchg->client, reg, data);
mutex_unlock(&pchg->xfer_lock);
return ret;
}
-static inline int lp8727_i2c_read_byte(struct lp8727_chg *pchg, u8 reg,
- u8 *data)
-{
- return lp8727_i2c_read(pchg, reg, data, 1);
-}
-
-static inline int lp8727_i2c_write_byte(struct lp8727_chg *pchg, u8 reg,
- u8 *data)
-{
- return lp8727_i2c_write(pchg, reg, data, 1);
-}
-
static int lp8727_is_charger_attached(const char *name, int id)
{
if (name) {
@@ -137,37 +131,41 @@ static int lp8727_is_charger_attached(const char *name, int id)
return (id >= ID_TA && id <= ID_USB_CHG) ? 1 : 0;
}
-static void lp8727_init_device(struct lp8727_chg *pchg)
+static int lp8727_init_device(struct lp8727_chg *pchg)
{
u8 val;
+ int ret;
val = ID200_EN | ADC_EN | CP_EN;
- if (lp8727_i2c_write_byte(pchg, CTRL1, &val))
- dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL1);
+ ret = lp8727_write_byte(pchg, CTRL1, val);
+ if (ret)
+ return ret;
val = INT_EN | CHGDET_EN;
- if (lp8727_i2c_write_byte(pchg, CTRL2, &val))
- dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL2);
+ ret = lp8727_write_byte(pchg, CTRL2, val);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int lp8727_is_dedicated_charger(struct lp8727_chg *pchg)
{
u8 val;
- lp8727_i2c_read_byte(pchg, STATUS1, &val);
- return (val & DCPORT);
+ lp8727_read_byte(pchg, STATUS1, &val);
+ return val & DCPORT;
}
static int lp8727_is_usb_charger(struct lp8727_chg *pchg)
{
u8 val;
- lp8727_i2c_read_byte(pchg, STATUS1, &val);
- return (val & CHPORT);
+ lp8727_read_byte(pchg, STATUS1, &val);
+ return val & CHPORT;
}
static void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw)
{
- u8 val = sw;
- lp8727_i2c_write_byte(pchg, SWCTRL, &val);
+ lp8727_write_byte(pchg, SWCTRL, sw);
}
static void lp8727_id_detection(struct lp8727_chg *pchg, u8 id, int vbusin)
@@ -207,9 +205,9 @@ static void lp8727_enable_chgdet(struct lp8727_chg *pchg)
{
u8 val;
- lp8727_i2c_read_byte(pchg, CTRL2, &val);
+ lp8727_read_byte(pchg, CTRL2, &val);
val |= CHGDET_EN;
- lp8727_i2c_write_byte(pchg, CTRL2, &val);
+ lp8727_write_byte(pchg, CTRL2, val);
}
static void lp8727_delayed_func(struct work_struct *_work)
@@ -218,7 +216,7 @@ static void lp8727_delayed_func(struct work_struct *_work)
struct lp8727_chg *pchg =
container_of(_work, struct lp8727_chg, work.work);
- if (lp8727_i2c_read(pchg, INT1, intstat, 2)) {
+ if (lp8727_read_bytes(pchg, INT1, intstat, 2)) {
dev_err(pchg->dev, "can not read INT registers\n");
return;
}
@@ -244,20 +242,22 @@ static irqreturn_t lp8727_isr_func(int irq, void *ptr)
return IRQ_HANDLED;
}
-static void lp8727_intr_config(struct lp8727_chg *pchg)
+static int lp8727_intr_config(struct lp8727_chg *pchg)
{
INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func);
pchg->irqthread = create_singlethread_workqueue("lp8727-irqthd");
- if (!pchg->irqthread)
+ if (!pchg->irqthread) {
dev_err(pchg->dev, "can not create thread for lp8727\n");
-
- if (request_threaded_irq(pchg->client->irq,
- NULL,
- lp8727_isr_func,
- IRQF_TRIGGER_FALLING, "lp8727_irq", pchg)) {
- dev_err(pchg->dev, "lp8727 irq can not be registered\n");
+ return -ENOMEM;
}
+
+ return request_threaded_irq(pchg->client->irq,
+ NULL,
+ lp8727_isr_func,
+ IRQF_TRIGGER_FALLING,
+ "lp8727_irq",
+ pchg);
}
static enum power_supply_property lp8727_charger_prop[] = {
@@ -300,7 +300,7 @@ static int lp8727_battery_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
- lp8727_i2c_read_byte(pchg, STATUS1, &read);
+ lp8727_read_byte(pchg, STATUS1, &read);
if (((read & CHGSTAT) >> 4) == EOC)
val->intval = POWER_SUPPLY_STATUS_FULL;
else
@@ -310,7 +310,7 @@ static int lp8727_battery_get_property(struct power_supply *psy,
}
break;
case POWER_SUPPLY_PROP_HEALTH:
- lp8727_i2c_read_byte(pchg, STATUS2, &read);
+ lp8727_read_byte(pchg, STATUS2, &read);
read = (read & TEMP_STAT) >> 5;
if (read >= 0x1 && read <= 0x3)
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
@@ -351,7 +351,7 @@ static void lp8727_charger_changed(struct power_supply *psy)
eoc_level = pchg->chg_parm->eoc_level;
ichg = pchg->chg_parm->ichg;
val = (ichg << 4) | eoc_level;
- lp8727_i2c_write_byte(pchg, CHGCTRL2, &val);
+ lp8727_write_byte(pchg, CHGCTRL2, val);
}
}
}
@@ -439,15 +439,29 @@ static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
mutex_init(&pchg->xfer_lock);
- lp8727_init_device(pchg);
- lp8727_intr_config(pchg);
+ ret = lp8727_init_device(pchg);
+ if (ret) {
+ dev_err(pchg->dev, "i2c communication err: %d", ret);
+ goto error;
+ }
+
+ ret = lp8727_intr_config(pchg);
+ if (ret) {
+ dev_err(pchg->dev, "irq handler err: %d", ret);
+ goto error;
+ }
ret = lp8727_register_psy(pchg);
- if (ret)
- dev_err(pchg->dev,
- "can not register power supplies. err=%d", ret);
+ if (ret) {
+ dev_err(pchg->dev, "power supplies register err: %d", ret);
+ goto error;
+ }
return 0;
+
+error:
+ kfree(pchg);
+ return ret;
}
static int __devexit lp8727_remove(struct i2c_client *cl)
@@ -466,6 +480,7 @@ static const struct i2c_device_id lp8727_ids[] = {
{"lp8727", 0},
{ }
};
+MODULE_DEVICE_TABLE(i2c, lp8727_ids);
static struct i2c_driver lp8727_driver = {
.driver = {
@@ -475,21 +490,9 @@ static struct i2c_driver lp8727_driver = {
.remove = __devexit_p(lp8727_remove),
.id_table = lp8727_ids,
};
+module_i2c_driver(lp8727_driver);
-static int __init lp8727_init(void)
-{
- return i2c_add_driver(&lp8727_driver);
-}
-
-static void __exit lp8727_exit(void)
-{
- i2c_del_driver(&lp8727_driver);
-}
-
-module_init(lp8727_init);
-module_exit(lp8727_exit);
-
-MODULE_DESCRIPTION("National Semiconductor LP8727 charger driver");
-MODULE_AUTHOR
- ("Woogyom Kim <milo.kim@ti.com>, Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_DESCRIPTION("TI/National Semiconductor LP8727 charger driver");
+MODULE_AUTHOR("Woogyom Kim <milo.kim@ti.com>, "
+ "Daniel Jeong <daniel.jeong@ti.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index 2f2f9a6f54fa..c284143cfcd7 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -290,18 +290,7 @@ static struct i2c_driver max17040_i2c_driver = {
.resume = max17040_resume,
.id_table = max17040_id,
};
-
-static int __init max17040_init(void)
-{
- return i2c_add_driver(&max17040_i2c_driver);
-}
-module_init(max17040_init);
-
-static void __exit max17040_exit(void)
-{
- i2c_del_driver(&max17040_i2c_driver);
-}
-module_exit(max17040_exit);
+module_i2c_driver(max17040_i2c_driver);
MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
MODULE_DESCRIPTION("MAX17040 Fuel Gauge");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 86acee2f9889..04620c2cb388 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -26,14 +26,47 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
+#include <linux/of.h>
+
+/* Status register bits */
+#define STATUS_POR_BIT (1 << 1)
+#define STATUS_BST_BIT (1 << 3)
+#define STATUS_VMN_BIT (1 << 8)
+#define STATUS_TMN_BIT (1 << 9)
+#define STATUS_SMN_BIT (1 << 10)
+#define STATUS_BI_BIT (1 << 11)
+#define STATUS_VMX_BIT (1 << 12)
+#define STATUS_TMX_BIT (1 << 13)
+#define STATUS_SMX_BIT (1 << 14)
+#define STATUS_BR_BIT (1 << 15)
+
+/* Interrupt mask bits */
+#define CONFIG_ALRT_BIT_ENBL (1 << 2)
+#define STATUS_INTR_SOCMIN_BIT (1 << 10)
+#define STATUS_INTR_SOCMAX_BIT (1 << 14)
+
+#define VFSOC0_LOCK 0x0000
+#define VFSOC0_UNLOCK 0x0080
+#define MODEL_UNLOCK1 0X0059
+#define MODEL_UNLOCK2 0X00C4
+#define MODEL_LOCK1 0X0000
+#define MODEL_LOCK2 0X0000
+
+#define dQ_ACC_DIV 0x4
+#define dP_ACC_100 0x1900
+#define dP_ACC_200 0x3200
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
struct max17042_platform_data *pdata;
+ struct work_struct work;
+ int init_complete;
};
static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
@@ -87,6 +120,9 @@ static int max17042_get_property(struct power_supply *psy,
struct max17042_chip, battery);
int ret;
+ if (!chip->init_complete)
+ return -EAGAIN;
+
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
ret = max17042_read_reg(chip->client, MAX17042_STATUS);
@@ -136,21 +172,18 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- ret = max17042_read_reg(chip->client, MAX17042_SOC);
+ ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
if (ret < 0)
return ret;
val->intval = ret >> 8;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+ ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
if (ret < 0)
return ret;
- if ((ret >> 8) >= MAX17042_BATTERY_FULL)
- val->intval = 1;
- else if (ret >= 0)
- val->intval = 0;
+ val->intval = ret * 1000 / 2;
break;
case POWER_SUPPLY_PROP_TEMP:
ret = max17042_read_reg(chip->client, MAX17042_TEMP);
@@ -210,22 +243,419 @@ static int max17042_get_property(struct power_supply *psy,
return 0;
}
+static int max17042_write_verify_reg(struct i2c_client *client,
+ u8 reg, u16 value)
+{
+ int retries = 8;
+ int ret;
+ u16 read_value;
+
+ do {
+ ret = i2c_smbus_write_word_data(client, reg, value);
+ read_value = max17042_read_reg(client, reg);
+ if (read_value != value) {
+ ret = -EIO;
+ retries--;
+ }
+ } while (retries && read_value != value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static inline void max17042_override_por(
+ struct i2c_client *client, u8 reg, u16 value)
+{
+ if (value)
+ max17042_write_reg(client, reg, value);
+}
+
+static inline void max10742_unlock_model(struct max17042_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
+ max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
+}
+
+static inline void max10742_lock_model(struct max17042_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
+ max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
+}
+
+static inline void max17042_write_model_data(struct max17042_chip *chip,
+ u8 addr, int size)
+{
+ struct i2c_client *client = chip->client;
+ int i;
+ for (i = 0; i < size; i++)
+ max17042_write_reg(client, addr + i,
+ chip->pdata->config_data->cell_char_tbl[i]);
+}
+
+static inline void max17042_read_model_data(struct max17042_chip *chip,
+ u8 addr, u16 *data, int size)
+{
+ struct i2c_client *client = chip->client;
+ int i;
+
+ for (i = 0; i < size; i++)
+ data[i] = max17042_read_reg(client, addr + i);
+}
+
+static inline int max17042_model_data_compare(struct max17042_chip *chip,
+ u16 *data1, u16 *data2, int size)
+{
+ int i;
+
+ if (memcmp(data1, data2, size)) {
+ dev_err(&chip->client->dev, "%s compare failed\n", __func__);
+ for (i = 0; i < size; i++)
+ dev_info(&chip->client->dev, "0x%x, 0x%x",
+ data1[i], data2[i]);
+ dev_info(&chip->client->dev, "\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int max17042_init_model(struct max17042_chip *chip)
+{
+ int ret;
+ int table_size =
+ sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+ u16 *temp_data;
+
+ temp_data = kzalloc(table_size, GFP_KERNEL);
+ if (!temp_data)
+ return -ENOMEM;
+
+ max10742_unlock_model(chip);
+ max17042_write_model_data(chip, MAX17042_MODELChrTbl,
+ table_size);
+ max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
+ table_size);
+
+ ret = max17042_model_data_compare(
+ chip,
+ chip->pdata->config_data->cell_char_tbl,
+ temp_data,
+ table_size);
+
+ max10742_lock_model(chip);
+ kfree(temp_data);
+
+ return ret;
+}
+
+static int max17042_verify_model_lock(struct max17042_chip *chip)
+{
+ int i;
+ int table_size =
+ sizeof(chip->pdata->config_data->cell_char_tbl);
+ u16 *temp_data;
+ int ret = 0;
+
+ temp_data = kzalloc(table_size, GFP_KERNEL);
+ if (!temp_data)
+ return -ENOMEM;
+
+ max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
+ table_size);
+ for (i = 0; i < table_size; i++)
+ if (temp_data[i])
+ ret = -EINVAL;
+
+ kfree(temp_data);
+ return ret;
+}
+
+static void max17042_write_config_regs(struct max17042_chip *chip)
+{
+ struct max17042_config_data *config = chip->pdata->config_data;
+
+ max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
+ max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
+ max17042_write_reg(chip->client, MAX17042_FilterCFG,
+ config->filter_cfg);
+ max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+}
+
+static void max17042_write_custom_regs(struct max17042_chip *chip)
+{
+ struct max17042_config_data *config = chip->pdata->config_data;
+
+ max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
+ config->rcomp0);
+ max17042_write_verify_reg(chip->client, MAX17042_TempCo,
+ config->tcompc0);
+ max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+ config->kempty0);
+ max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
+ config->ichgt_term);
+}
+
+static void max17042_update_capacity_regs(struct max17042_chip *chip)
+{
+ struct max17042_config_data *config = chip->pdata->config_data;
+
+ max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+ config->fullcap);
+ max17042_write_reg(chip->client, MAX17042_DesignCap,
+ config->design_cap);
+ max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+ config->fullcapnom);
+}
+
+static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
+{
+ u16 vfSoc;
+
+ vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+ max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
+ max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
+ max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
+}
+
+static void max17042_load_new_capacity_params(struct max17042_chip *chip)
+{
+ u16 full_cap0, rep_cap, dq_acc, vfSoc;
+ u32 rem_cap;
+
+ struct max17042_config_data *config = chip->pdata->config_data;
+
+ full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
+ vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+
+ /* fg_vfSoc needs to shifted by 8 bits to get the
+ * perc in 1% accuracy, to get the right rem_cap multiply
+ * full_cap0, fg_vfSoc and devide by 100
+ */
+ rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
+ max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
+
+ rep_cap = (u16)rem_cap;
+ max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
+
+ /* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
+ dq_acc = config->fullcap / dQ_ACC_DIV;
+ max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
+ max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
+
+ max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+ config->fullcap);
+ max17042_write_reg(chip->client, MAX17042_DesignCap,
+ config->design_cap);
+ max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+ config->fullcapnom);
+}
+
+/*
+ * Block write all the override values coming from platform data.
+ * This function MUST be called before the POR initialization proceedure
+ * specified by maxim.
+ */
+static inline void max17042_override_por_values(struct max17042_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ struct max17042_config_data *config = chip->pdata->config_data;
+
+ max17042_override_por(client, MAX17042_TGAIN, config->tgain);
+ max17042_override_por(client, MAx17042_TOFF, config->toff);
+ max17042_override_por(client, MAX17042_CGAIN, config->cgain);
+ max17042_override_por(client, MAX17042_COFF, config->coff);
+
+ max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
+ max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
+ max17042_override_por(client, MAX17042_SALRT_Th,
+ config->soc_alrt_thresh);
+ max17042_override_por(client, MAX17042_CONFIG, config->config);
+ max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
+
+ max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
+ max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
+
+ max17042_override_por(client, MAX17042_AtRate, config->at_rate);
+ max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
+ max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
+ max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
+ max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
+ max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
+
+ max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
+ max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
+ max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+ max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
+ max17042_override_por(client, MAX17042_dQacc, config->dqacc);
+ max17042_override_por(client, MAX17042_dPacc, config->dpacc);
+
+ max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
+ max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
+ max17042_override_por(client, MAX17042_FCTC, config->fctc);
+ max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
+ max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
+ max17042_override_por(client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+}
+
+static int max17042_init_chip(struct max17042_chip *chip)
+{
+ int ret;
+ int val;
+
+ max17042_override_por_values(chip);
+ /* After Power up, the MAX17042 requires 500mS in order
+ * to perform signal debouncing and initial SOC reporting
+ */
+ msleep(500);
+
+ /* Initialize configaration */
+ max17042_write_config_regs(chip);
+
+ /* write cell characterization data */
+ ret = max17042_init_model(chip);
+ if (ret) {
+ dev_err(&chip->client->dev, "%s init failed\n",
+ __func__);
+ return -EIO;
+ }
+ max17042_verify_model_lock(chip);
+ if (ret) {
+ dev_err(&chip->client->dev, "%s lock verify failed\n",
+ __func__);
+ return -EIO;
+ }
+ /* write custom parameters */
+ max17042_write_custom_regs(chip);
+
+ /* update capacity params */
+ max17042_update_capacity_regs(chip);
+
+ /* delay must be atleast 350mS to allow VFSOC
+ * to be calculated from the new configuration
+ */
+ msleep(350);
+
+ /* reset vfsoc0 reg */
+ max17042_reset_vfsoc0_reg(chip);
+
+ /* load new capacity params */
+ max17042_load_new_capacity_params(chip);
+
+ /* Init complete, Clear the POR bit */
+ val = max17042_read_reg(chip->client, MAX17042_STATUS);
+ max17042_write_reg(chip->client, MAX17042_STATUS,
+ val & (~STATUS_POR_BIT));
+ return 0;
+}
+
+static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
+{
+ u16 soc, soc_tr;
+
+ /* program interrupt thesholds such that we should
+ * get interrupt for every 'off' perc change in the soc
+ */
+ soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
+ soc_tr = (soc + off) << 8;
+ soc_tr |= (soc - off);
+ max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
+}
+
+static irqreturn_t max17042_thread_handler(int id, void *dev)
+{
+ struct max17042_chip *chip = dev;
+ u16 val;
+
+ val = max17042_read_reg(chip->client, MAX17042_STATUS);
+ if ((val & STATUS_INTR_SOCMIN_BIT) ||
+ (val & STATUS_INTR_SOCMAX_BIT)) {
+ dev_info(&chip->client->dev, "SOC threshold INTR\n");
+ max17042_set_soc_threshold(chip, 1);
+ }
+
+ power_supply_changed(&chip->battery);
+ return IRQ_HANDLED;
+}
+
+static void max17042_init_worker(struct work_struct *work)
+{
+ struct max17042_chip *chip = container_of(work,
+ struct max17042_chip, work);
+ int ret;
+
+ /* Initialize registers according to values from the platform data */
+ if (chip->pdata->enable_por_init && chip->pdata->config_data) {
+ ret = max17042_init_chip(chip);
+ if (ret)
+ return;
+ }
+
+ chip->init_complete = 1;
+}
+
+#ifdef CONFIG_OF
+static struct max17042_platform_data *
+max17042_get_pdata(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 prop;
+ struct max17042_platform_data *pdata;
+
+ if (!np)
+ return dev->platform_data;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /*
+ * Require current sense resistor value to be specified for
+ * current-sense functionality to be enabled at all.
+ */
+ if (of_property_read_u32(np, "maxim,rsns-microohm", &prop) == 0) {
+ pdata->r_sns = prop;
+ pdata->enable_current_sense = true;
+ }
+
+ return pdata;
+}
+#else
+static struct max17042_platform_data *
+max17042_get_pdata(struct device *dev)
+{
+ return dev->platform_data;
+}
+#endif
+
static int __devinit max17042_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct max17042_chip *chip;
int ret;
+ int reg;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->client = client;
- chip->pdata = client->dev.platform_data;
+ chip->pdata = max17042_get_pdata(&client->dev);
+ if (!chip->pdata) {
+ dev_err(&client->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
i2c_set_clientdata(client, chip);
@@ -243,17 +673,9 @@ static int __devinit max17042_probe(struct i2c_client *client,
if (chip->pdata->r_sns == 0)
chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
- ret = power_supply_register(&client->dev, &chip->battery);
- if (ret) {
- dev_err(&client->dev, "failed: power supply register\n");
- kfree(chip);
- return ret;
- }
-
- /* Initialize registers according to values from the platform data */
if (chip->pdata->init_data)
max17042_set_reg(client, chip->pdata->init_data,
- chip->pdata->num_init_data);
+ chip->pdata->num_init_data);
if (!chip->pdata->enable_current_sense) {
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
@@ -261,7 +683,34 @@ static int __devinit max17042_probe(struct i2c_client *client,
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
}
- return 0;
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq, NULL,
+ max17042_thread_handler,
+ IRQF_TRIGGER_FALLING,
+ chip->battery.name, chip);
+ if (!ret) {
+ reg = max17042_read_reg(client, MAX17042_CONFIG);
+ reg |= CONFIG_ALRT_BIT_ENBL;
+ max17042_write_reg(client, MAX17042_CONFIG, reg);
+ max17042_set_soc_threshold(chip, 1);
+ } else
+ dev_err(&client->dev, "%s(): cannot get IRQ\n",
+ __func__);
+ }
+
+ reg = max17042_read_reg(chip->client, MAX17042_STATUS);
+
+ if (reg & STATUS_POR_BIT) {
+ INIT_WORK(&chip->work, max17042_init_worker);
+ schedule_work(&chip->work);
+ } else {
+ chip->init_complete = 1;
+ }
+
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret)
+ dev_err(&client->dev, "failed: power supply register\n");
+ return ret;
}
static int __devexit max17042_remove(struct i2c_client *client)
@@ -269,10 +718,17 @@ static int __devexit max17042_remove(struct i2c_client *client)
struct max17042_chip *chip = i2c_get_clientdata(client);
power_supply_unregister(&chip->battery);
- kfree(chip);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id max17042_dt_match[] = {
+ { .compatible = "maxim,max17042" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max17042_dt_match);
+#endif
+
static const struct i2c_device_id max17042_id[] = {
{ "max17042", 0 },
{ }
@@ -282,23 +738,13 @@ MODULE_DEVICE_TABLE(i2c, max17042_id);
static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
+ .of_match_table = of_match_ptr(max17042_dt_match),
},
.probe = max17042_probe,
.remove = __devexit_p(max17042_remove),
.id_table = max17042_id,
};
-
-static int __init max17042_init(void)
-{
- return i2c_add_driver(&max17042_i2c_driver);
-}
-module_init(max17042_init);
-
-static void __exit max17042_exit(void)
-{
- i2c_del_driver(&max17042_i2c_driver);
-}
-module_exit(max17042_exit);
+module_i2c_driver(max17042_i2c_driver);
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 9ff8af069da6..06b659d91790 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -852,18 +852,7 @@ static struct i2c_driver sbs_battery_driver = {
.of_match_table = sbs_dt_ids,
},
};
-
-static int __init sbs_battery_init(void)
-{
- return i2c_add_driver(&sbs_battery_driver);
-}
-module_init(sbs_battery_init);
-
-static void __exit sbs_battery_exit(void)
-{
- i2c_del_driver(&sbs_battery_driver);
-}
-module_exit(sbs_battery_exit);
+module_i2c_driver(sbs_battery_driver);
MODULE_DESCRIPTION("SBS battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
new file mode 100644
index 000000000000..ce1694d1a365
--- /dev/null
+++ b/drivers/power/smb347-charger.c
@@ -0,0 +1,1294 @@
+/*
+ * Summit Microelectronics SMB347 Battery Charger Driver
+ *
+ * Copyright (C) 2011, Intel Corporation
+ *
+ * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/power/smb347-charger.h>
+#include <linux/seq_file.h>
+
+/*
+ * Configuration registers. These are mirrored to volatile RAM and can be
+ * written once %CMD_A_ALLOW_WRITE is set in %CMD_A register. They will be
+ * reloaded from non-volatile registers after POR.
+ */
+#define CFG_CHARGE_CURRENT 0x00
+#define CFG_CHARGE_CURRENT_FCC_MASK 0xe0
+#define CFG_CHARGE_CURRENT_FCC_SHIFT 5
+#define CFG_CHARGE_CURRENT_PCC_MASK 0x18
+#define CFG_CHARGE_CURRENT_PCC_SHIFT 3
+#define CFG_CHARGE_CURRENT_TC_MASK 0x07
+#define CFG_CURRENT_LIMIT 0x01
+#define CFG_CURRENT_LIMIT_DC_MASK 0xf0
+#define CFG_CURRENT_LIMIT_DC_SHIFT 4
+#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
+#define CFG_FLOAT_VOLTAGE 0x03
+#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
+#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
+#define CFG_STAT 0x05
+#define CFG_STAT_DISABLED BIT(5)
+#define CFG_STAT_ACTIVE_HIGH BIT(7)
+#define CFG_PIN 0x06
+#define CFG_PIN_EN_CTRL_MASK 0x60
+#define CFG_PIN_EN_CTRL_ACTIVE_HIGH 0x40
+#define CFG_PIN_EN_CTRL_ACTIVE_LOW 0x60
+#define CFG_PIN_EN_APSD_IRQ BIT(1)
+#define CFG_PIN_EN_CHARGER_ERROR BIT(2)
+#define CFG_THERM 0x07
+#define CFG_THERM_SOFT_HOT_COMPENSATION_MASK 0x03
+#define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT 0
+#define CFG_THERM_SOFT_COLD_COMPENSATION_MASK 0x0c
+#define CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT 2
+#define CFG_THERM_MONITOR_DISABLED BIT(4)
+#define CFG_SYSOK 0x08
+#define CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED BIT(2)
+#define CFG_OTHER 0x09
+#define CFG_OTHER_RID_MASK 0xc0
+#define CFG_OTHER_RID_ENABLED_AUTO_OTG 0xc0
+#define CFG_OTG 0x0a
+#define CFG_OTG_TEMP_THRESHOLD_MASK 0x30
+#define CFG_OTG_TEMP_THRESHOLD_SHIFT 4
+#define CFG_OTG_CC_COMPENSATION_MASK 0xc0
+#define CFG_OTG_CC_COMPENSATION_SHIFT 6
+#define CFG_TEMP_LIMIT 0x0b
+#define CFG_TEMP_LIMIT_SOFT_HOT_MASK 0x03
+#define CFG_TEMP_LIMIT_SOFT_HOT_SHIFT 0
+#define CFG_TEMP_LIMIT_SOFT_COLD_MASK 0x0c
+#define CFG_TEMP_LIMIT_SOFT_COLD_SHIFT 2
+#define CFG_TEMP_LIMIT_HARD_HOT_MASK 0x30
+#define CFG_TEMP_LIMIT_HARD_HOT_SHIFT 4
+#define CFG_TEMP_LIMIT_HARD_COLD_MASK 0xc0
+#define CFG_TEMP_LIMIT_HARD_COLD_SHIFT 6
+#define CFG_FAULT_IRQ 0x0c
+#define CFG_FAULT_IRQ_DCIN_UV BIT(2)
+#define CFG_STATUS_IRQ 0x0d
+#define CFG_STATUS_IRQ_TERMINATION_OR_TAPER BIT(4)
+#define CFG_ADDRESS 0x0e
+
+/* Command registers */
+#define CMD_A 0x30
+#define CMD_A_CHG_ENABLED BIT(1)
+#define CMD_A_SUSPEND_ENABLED BIT(2)
+#define CMD_A_ALLOW_WRITE BIT(7)
+#define CMD_B 0x31
+#define CMD_C 0x33
+
+/* Interrupt Status registers */
+#define IRQSTAT_A 0x35
+#define IRQSTAT_C 0x37
+#define IRQSTAT_C_TERMINATION_STAT BIT(0)
+#define IRQSTAT_C_TERMINATION_IRQ BIT(1)
+#define IRQSTAT_C_TAPER_IRQ BIT(3)
+#define IRQSTAT_E 0x39
+#define IRQSTAT_E_USBIN_UV_STAT BIT(0)
+#define IRQSTAT_E_USBIN_UV_IRQ BIT(1)
+#define IRQSTAT_E_DCIN_UV_STAT BIT(4)
+#define IRQSTAT_E_DCIN_UV_IRQ BIT(5)
+#define IRQSTAT_F 0x3a
+
+/* Status registers */
+#define STAT_A 0x3b
+#define STAT_A_FLOAT_VOLTAGE_MASK 0x3f
+#define STAT_B 0x3c
+#define STAT_C 0x3d
+#define STAT_C_CHG_ENABLED BIT(0)
+#define STAT_C_CHG_MASK 0x06
+#define STAT_C_CHG_SHIFT 1
+#define STAT_C_CHARGER_ERROR BIT(6)
+#define STAT_E 0x3f
+
+/**
+ * struct smb347_charger - smb347 charger instance
+ * @lock: protects concurrent access to online variables
+ * @client: pointer to i2c client
+ * @mains: power_supply instance for AC/DC power
+ * @usb: power_supply instance for USB power
+ * @battery: power_supply instance for battery
+ * @mains_online: is AC/DC input connected
+ * @usb_online: is USB input connected
+ * @charging_enabled: is charging enabled
+ * @dentry: for debugfs
+ * @pdata: pointer to platform data
+ */
+struct smb347_charger {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct power_supply mains;
+ struct power_supply usb;
+ struct power_supply battery;
+ bool mains_online;
+ bool usb_online;
+ bool charging_enabled;
+ struct dentry *dentry;
+ const struct smb347_charger_platform_data *pdata;
+};
+
+/* Fast charge current in uA */
+static const unsigned int fcc_tbl[] = {
+ 700000,
+ 900000,
+ 1200000,
+ 1500000,
+ 1800000,
+ 2000000,
+ 2200000,
+ 2500000,
+};
+
+/* Pre-charge current in uA */
+static const unsigned int pcc_tbl[] = {
+ 100000,
+ 150000,
+ 200000,
+ 250000,
+};
+
+/* Termination current in uA */
+static const unsigned int tc_tbl[] = {
+ 37500,
+ 50000,
+ 100000,
+ 150000,
+ 200000,
+ 250000,
+ 500000,
+ 600000,
+};
+
+/* Input current limit in uA */
+static const unsigned int icl_tbl[] = {
+ 300000,
+ 500000,
+ 700000,
+ 900000,
+ 1200000,
+ 1500000,
+ 1800000,
+ 2000000,
+ 2200000,
+ 2500000,
+};
+
+/* Charge current compensation in uA */
+static const unsigned int ccc_tbl[] = {
+ 250000,
+ 700000,
+ 900000,
+ 1200000,
+};
+
+/* Convert register value to current using lookup table */
+static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
+{
+ if (val >= size)
+ return -EINVAL;
+ return tbl[val];
+}
+
+/* Convert current to register value using lookup table */
+static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (val < tbl[i])
+ break;
+ return i > 0 ? i - 1 : -EINVAL;
+}
+
+static int smb347_read(struct smb347_charger *smb, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(smb->client, reg);
+ if (ret < 0)
+ dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
+ reg, ret);
+ return ret;
+}
+
+static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(smb->client, reg, val);
+ if (ret < 0)
+ dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
+ reg, ret);
+ return ret;
+}
+
+/**
+ * smb347_update_status - updates the charging status
+ * @smb: pointer to smb347 charger instance
+ *
+ * Function checks status of the charging and updates internal state
+ * accordingly. Returns %0 if there is no change in status, %1 if the
+ * status has changed and negative errno in case of failure.
+ */
+static int smb347_update_status(struct smb347_charger *smb)
+{
+ bool usb = false;
+ bool dc = false;
+ int ret;
+
+ ret = smb347_read(smb, IRQSTAT_E);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Dc and usb are set depending on whether they are enabled in
+ * platform data _and_ whether corresponding undervoltage is set.
+ */
+ if (smb->pdata->use_mains)
+ dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+ if (smb->pdata->use_usb)
+ usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+
+ mutex_lock(&smb->lock);
+ ret = smb->mains_online != dc || smb->usb_online != usb;
+ smb->mains_online = dc;
+ smb->usb_online = usb;
+ mutex_unlock(&smb->lock);
+
+ return ret;
+}
+
+/*
+ * smb347_is_online - returns whether input power source is connected
+ * @smb: pointer to smb347 charger instance
+ *
+ * Returns %true if input power source is connected. Note that this is
+ * dependent on what platform has configured for usable power sources. For
+ * example if USB is disabled, this will return %false even if the USB
+ * cable is connected.
+ */
+static bool smb347_is_online(struct smb347_charger *smb)
+{
+ bool ret;
+
+ mutex_lock(&smb->lock);
+ ret = smb->usb_online || smb->mains_online;
+ mutex_unlock(&smb->lock);
+
+ return ret;
+}
+
+/**
+ * smb347_charging_status - returns status of charging
+ * @smb: pointer to smb347 charger instance
+ *
+ * Function returns charging status. %0 means no charging is in progress,
+ * %1 means pre-charging, %2 fast-charging and %3 taper-charging.
+ */
+static int smb347_charging_status(struct smb347_charger *smb)
+{
+ int ret;
+
+ if (!smb347_is_online(smb))
+ return 0;
+
+ ret = smb347_read(smb, STAT_C);
+ if (ret < 0)
+ return 0;
+
+ return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+}
+
+static int smb347_charging_set(struct smb347_charger *smb, bool enable)
+{
+ int ret = 0;
+
+ if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
+ dev_dbg(&smb->client->dev,
+ "charging enable/disable in SW disabled\n");
+ return 0;
+ }
+
+ mutex_lock(&smb->lock);
+ if (smb->charging_enabled != enable) {
+ ret = smb347_read(smb, CMD_A);
+ if (ret < 0)
+ goto out;
+
+ smb->charging_enabled = enable;
+
+ if (enable)
+ ret |= CMD_A_CHG_ENABLED;
+ else
+ ret &= ~CMD_A_CHG_ENABLED;
+
+ ret = smb347_write(smb, CMD_A, ret);
+ }
+out:
+ mutex_unlock(&smb->lock);
+ return ret;
+}
+
+static inline int smb347_charging_enable(struct smb347_charger *smb)
+{
+ return smb347_charging_set(smb, true);
+}
+
+static inline int smb347_charging_disable(struct smb347_charger *smb)
+{
+ return smb347_charging_set(smb, false);
+}
+
+static int smb347_update_online(struct smb347_charger *smb)
+{
+ int ret;
+
+ /*
+ * Depending on whether valid power source is connected or not, we
+ * disable or enable the charging. We do it manually because it
+ * depends on how the platform has configured the valid inputs.
+ */
+ if (smb347_is_online(smb)) {
+ ret = smb347_charging_enable(smb);
+ if (ret < 0)
+ dev_err(&smb->client->dev,
+ "failed to enable charging\n");
+ } else {
+ ret = smb347_charging_disable(smb);
+ if (ret < 0)
+ dev_err(&smb->client->dev,
+ "failed to disable charging\n");
+ }
+
+ return ret;
+}
+
+static int smb347_set_charge_current(struct smb347_charger *smb)
+{
+ int ret, val;
+
+ ret = smb347_read(smb, CFG_CHARGE_CURRENT);
+ if (ret < 0)
+ return ret;
+
+ if (smb->pdata->max_charge_current) {
+ val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+ smb->pdata->max_charge_current);
+ if (val < 0)
+ return val;
+
+ ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
+ ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+ }
+
+ if (smb->pdata->pre_charge_current) {
+ val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+ smb->pdata->pre_charge_current);
+ if (val < 0)
+ return val;
+
+ ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
+ ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+ }
+
+ if (smb->pdata->termination_current) {
+ val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+ smb->pdata->termination_current);
+ if (val < 0)
+ return val;
+
+ ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
+ ret |= val;
+ }
+
+ return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+}
+
+static int smb347_set_current_limits(struct smb347_charger *smb)
+{
+ int ret, val;
+
+ ret = smb347_read(smb, CFG_CURRENT_LIMIT);
+ if (ret < 0)
+ return ret;
+
+ if (smb->pdata->mains_current_limit) {
+ val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ smb->pdata->mains_current_limit);
+ if (val < 0)
+ return val;
+
+ ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
+ ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+ }
+
+ if (smb->pdata->usb_hc_current_limit) {
+ val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ smb->pdata->usb_hc_current_limit);
+ if (val < 0)
+ return val;
+
+ ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
+ ret |= val;
+ }
+
+ return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+}
+
+static int smb347_set_voltage_limits(struct smb347_charger *smb)
+{
+ int ret, val;
+
+ ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
+ if (ret < 0)
+ return ret;
+
+ if (smb->pdata->pre_to_fast_voltage) {
+ val = smb->pdata->pre_to_fast_voltage;
+
+ /* uV */
+ val = clamp_val(val, 2400000, 3000000) - 2400000;
+ val /= 200000;
+
+ ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
+ ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+ }
+
+ if (smb->pdata->max_charge_voltage) {
+ val = smb->pdata->max_charge_voltage;
+
+ /* uV */
+ val = clamp_val(val, 3500000, 4500000) - 3500000;
+ val /= 20000;
+
+ ret |= val;
+ }
+
+ return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+}
+
+static int smb347_set_temp_limits(struct smb347_charger *smb)
+{
+ bool enable_therm_monitor = false;
+ int ret, val;
+
+ if (smb->pdata->chip_temp_threshold) {
+ val = smb->pdata->chip_temp_threshold;
+
+ /* degree C */
+ val = clamp_val(val, 100, 130) - 100;
+ val /= 10;
+
+ ret = smb347_read(smb, CFG_OTG);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
+ ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
+
+ ret = smb347_write(smb, CFG_OTG, ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = smb347_read(smb, CFG_TEMP_LIMIT);
+ if (ret < 0)
+ return ret;
+
+ if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+ val = smb->pdata->soft_cold_temp_limit;
+
+ val = clamp_val(val, 0, 15);
+ val /= 5;
+ /* this goes from higher to lower so invert the value */
+ val = ~val & 0x3;
+
+ ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
+ ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+
+ enable_therm_monitor = true;
+ }
+
+ if (smb->pdata->soft_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+ val = smb->pdata->soft_hot_temp_limit;
+
+ val = clamp_val(val, 40, 55) - 40;
+ val /= 5;
+
+ ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
+ ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+
+ enable_therm_monitor = true;
+ }
+
+ if (smb->pdata->hard_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+ val = smb->pdata->hard_cold_temp_limit;
+
+ val = clamp_val(val, -5, 10) + 5;
+ val /= 5;
+ /* this goes from higher to lower so invert the value */
+ val = ~val & 0x3;
+
+ ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
+ ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+
+ enable_therm_monitor = true;
+ }
+
+ if (smb->pdata->hard_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+ val = smb->pdata->hard_hot_temp_limit;
+
+ val = clamp_val(val, 50, 65) - 50;
+ val /= 5;
+
+ ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
+ ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+
+ enable_therm_monitor = true;
+ }
+
+ ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If any of the temperature limits are set, we also enable the
+ * thermistor monitoring.
+ *
+ * When soft limits are hit, the device will start to compensate
+ * current and/or voltage depending on the configuration.
+ *
+ * When hard limit is hit, the device will suspend charging
+ * depending on the configuration.
+ */
+ if (enable_therm_monitor) {
+ ret = smb347_read(smb, CFG_THERM);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~CFG_THERM_MONITOR_DISABLED;
+
+ ret = smb347_write(smb, CFG_THERM, ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (smb->pdata->suspend_on_hard_temp_limit) {
+ ret = smb347_read(smb, CFG_SYSOK);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
+
+ ret = smb347_write(smb, CFG_SYSOK, ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (smb->pdata->soft_temp_limit_compensation !=
+ SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
+ val = smb->pdata->soft_temp_limit_compensation & 0x3;
+
+ ret = smb347_read(smb, CFG_THERM);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
+ ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
+
+ ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
+ ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
+
+ ret = smb347_write(smb, CFG_THERM, ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (smb->pdata->charge_current_compensation) {
+ val = current_to_hw(ccc_tbl, ARRAY_SIZE(ccc_tbl),
+ smb->pdata->charge_current_compensation);
+ if (val < 0)
+ return val;
+
+ ret = smb347_read(smb, CFG_OTG);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
+ ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
+
+ ret = smb347_write(smb, CFG_OTG, ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * smb347_set_writable - enables/disables writing to non-volatile registers
+ * @smb: pointer to smb347 charger instance
+ *
+ * You can enable/disable writing to the non-volatile configuration
+ * registers by calling this function.
+ *
+ * Returns %0 on success and negative errno in case of failure.
+ */
+static int smb347_set_writable(struct smb347_charger *smb, bool writable)
+{
+ int ret;
+
+ ret = smb347_read(smb, CMD_A);
+ if (ret < 0)
+ return ret;
+
+ if (writable)
+ ret |= CMD_A_ALLOW_WRITE;
+ else
+ ret &= ~CMD_A_ALLOW_WRITE;
+
+ return smb347_write(smb, CMD_A, ret);
+}
+
+static int smb347_hw_init(struct smb347_charger *smb)
+{
+ int ret;
+
+ ret = smb347_set_writable(smb, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Program the platform specific configuration values to the device
+ * first.
+ */
+ ret = smb347_set_charge_current(smb);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_set_current_limits(smb);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_set_voltage_limits(smb);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_set_temp_limits(smb);
+ if (ret < 0)
+ goto fail;
+
+ /* If USB charging is disabled we put the USB in suspend mode */
+ if (!smb->pdata->use_usb) {
+ ret = smb347_read(smb, CMD_A);
+ if (ret < 0)
+ goto fail;
+
+ ret |= CMD_A_SUSPEND_ENABLED;
+
+ ret = smb347_write(smb, CMD_A, ret);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = smb347_read(smb, CFG_OTHER);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * If configured by platform data, we enable hardware Auto-OTG
+ * support for driving VBUS. Otherwise we disable it.
+ */
+ ret &= ~CFG_OTHER_RID_MASK;
+ if (smb->pdata->use_usb_otg)
+ ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
+
+ ret = smb347_write(smb, CFG_OTHER, ret);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_read(smb, CFG_PIN);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * Make the charging functionality controllable by a write to the
+ * command register unless pin control is specified in the platform
+ * data.
+ */
+ ret &= ~CFG_PIN_EN_CTRL_MASK;
+
+ switch (smb->pdata->enable_control) {
+ case SMB347_CHG_ENABLE_SW:
+ /* Do nothing, 0 means i2c control */
+ break;
+ case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
+ ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+ break;
+ case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
+ ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ break;
+ }
+
+ /* Disable Automatic Power Source Detection (APSD) interrupt. */
+ ret &= ~CFG_PIN_EN_APSD_IRQ;
+
+ ret = smb347_write(smb, CFG_PIN, ret);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_update_status(smb);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_update_online(smb);
+
+fail:
+ smb347_set_writable(smb, false);
+ return ret;
+}
+
+static irqreturn_t smb347_interrupt(int irq, void *data)
+{
+ struct smb347_charger *smb = data;
+ int stat_c, irqstat_e, irqstat_c;
+ irqreturn_t ret = IRQ_NONE;
+
+ stat_c = smb347_read(smb, STAT_C);
+ if (stat_c < 0) {
+ dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+ return IRQ_NONE;
+ }
+
+ irqstat_c = smb347_read(smb, IRQSTAT_C);
+ if (irqstat_c < 0) {
+ dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+ return IRQ_NONE;
+ }
+
+ irqstat_e = smb347_read(smb, IRQSTAT_E);
+ if (irqstat_e < 0) {
+ dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+ return IRQ_NONE;
+ }
+
+ /*
+ * If we get charger error we report the error back to user and
+ * disable charging.
+ */
+ if (stat_c & STAT_C_CHARGER_ERROR) {
+ dev_err(&smb->client->dev,
+ "error in charger, disabling charging\n");
+
+ smb347_charging_disable(smb);
+ power_supply_changed(&smb->battery);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /*
+ * If we reached the termination current the battery is charged and
+ * we can update the status now. Charging is automatically
+ * disabled by the hardware.
+ */
+ if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
+ if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
+ power_supply_changed(&smb->battery);
+ ret = IRQ_HANDLED;
+ }
+
+ /*
+ * If we got an under voltage interrupt it means that AC/USB input
+ * was connected or disconnected.
+ */
+ if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
+ if (smb347_update_status(smb) > 0) {
+ smb347_update_online(smb);
+ power_supply_changed(&smb->mains);
+ power_supply_changed(&smb->usb);
+ }
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int smb347_irq_set(struct smb347_charger *smb, bool enable)
+{
+ int ret;
+
+ ret = smb347_set_writable(smb, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable/disable interrupts for:
+ * - under voltage
+ * - termination current reached
+ * - charger error
+ */
+ if (enable) {
+ ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_write(smb, CFG_STATUS_IRQ,
+ CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_read(smb, CFG_PIN);
+ if (ret < 0)
+ goto fail;
+
+ ret |= CFG_PIN_EN_CHARGER_ERROR;
+
+ ret = smb347_write(smb, CFG_PIN, ret);
+ } else {
+ ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
+ if (ret < 0)
+ goto fail;
+
+ ret = smb347_read(smb, CFG_PIN);
+ if (ret < 0)
+ goto fail;
+
+ ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+
+ ret = smb347_write(smb, CFG_PIN, ret);
+ }
+
+fail:
+ smb347_set_writable(smb, false);
+ return ret;
+}
+
+static inline int smb347_irq_enable(struct smb347_charger *smb)
+{
+ return smb347_irq_set(smb, true);
+}
+
+static inline int smb347_irq_disable(struct smb347_charger *smb)
+{
+ return smb347_irq_set(smb, false);
+}
+
+static int smb347_irq_init(struct smb347_charger *smb)
+{
+ const struct smb347_charger_platform_data *pdata = smb->pdata;
+ int ret, irq = gpio_to_irq(pdata->irq_gpio);
+
+ ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+ if (ret < 0)
+ goto fail;
+
+ ret = request_threaded_irq(irq, NULL, smb347_interrupt,
+ IRQF_TRIGGER_FALLING, smb->client->name,
+ smb);
+ if (ret < 0)
+ goto fail_gpio;
+
+ ret = smb347_set_writable(smb, true);
+ if (ret < 0)
+ goto fail_irq;
+
+ /*
+ * Configure the STAT output to be suitable for interrupts: disable
+ * all other output (except interrupts) and make it active low.
+ */
+ ret = smb347_read(smb, CFG_STAT);
+ if (ret < 0)
+ goto fail_readonly;
+
+ ret &= ~CFG_STAT_ACTIVE_HIGH;
+ ret |= CFG_STAT_DISABLED;
+
+ ret = smb347_write(smb, CFG_STAT, ret);
+ if (ret < 0)
+ goto fail_readonly;
+
+ ret = smb347_irq_enable(smb);
+ if (ret < 0)
+ goto fail_readonly;
+
+ smb347_set_writable(smb, false);
+ smb->client->irq = irq;
+ return 0;
+
+fail_readonly:
+ smb347_set_writable(smb, false);
+fail_irq:
+ free_irq(irq, smb);
+fail_gpio:
+ gpio_free(pdata->irq_gpio);
+fail:
+ smb->client->irq = 0;
+ return ret;
+}
+
+static int smb347_mains_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb347_charger *smb =
+ container_of(psy, struct smb347_charger, mains);
+
+ if (prop == POWER_SUPPLY_PROP_ONLINE) {
+ val->intval = smb->mains_online;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static enum power_supply_property smb347_mains_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int smb347_usb_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb347_charger *smb =
+ container_of(psy, struct smb347_charger, usb);
+
+ if (prop == POWER_SUPPLY_PROP_ONLINE) {
+ val->intval = smb->usb_online;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static enum power_supply_property smb347_usb_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int smb347_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb347_charger *smb =
+ container_of(psy, struct smb347_charger, battery);
+ const struct smb347_charger_platform_data *pdata = smb->pdata;
+ int ret;
+
+ ret = smb347_update_status(smb);
+ if (ret < 0)
+ return ret;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (!smb347_is_online(smb)) {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ }
+ if (smb347_charging_status(smb))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ if (!smb347_is_online(smb))
+ return -ENODATA;
+
+ /*
+ * We handle trickle and pre-charging the same, and taper
+ * and none the same.
+ */
+ switch (smb347_charging_status(smb)) {
+ case 1:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case 2:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = pdata->battery_info.technology;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = pdata->battery_info.voltage_min_design;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = pdata->battery_info.voltage_max_design;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (!smb347_is_online(smb))
+ return -ENODATA;
+ ret = smb347_read(smb, STAT_A);
+ if (ret < 0)
+ return ret;
+
+ ret &= STAT_A_FLOAT_VOLTAGE_MASK;
+ if (ret > 0x3d)
+ ret = 0x3d;
+
+ val->intval = 3500000 + ret * 20000;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (!smb347_is_online(smb))
+ return -ENODATA;
+
+ ret = smb347_read(smb, STAT_B);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The current value is composition of FCC and PCC values
+ * and we can detect which table to use from bit 5.
+ */
+ if (ret & 0x20) {
+ val->intval = hw_to_current(fcc_tbl,
+ ARRAY_SIZE(fcc_tbl),
+ ret & 7);
+ } else {
+ ret >>= 3;
+ val->intval = hw_to_current(pcc_tbl,
+ ARRAY_SIZE(pcc_tbl),
+ ret & 7);
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = pdata->battery_info.charge_full_design;
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = pdata->battery_info.name;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property smb347_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb347_debugfs_show(struct seq_file *s, void *data)
+{
+ struct smb347_charger *smb = s->private;
+ int ret;
+ u8 reg;
+
+ seq_printf(s, "Control registers:\n");
+ seq_printf(s, "==================\n");
+ for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
+ ret = smb347_read(smb, reg);
+ seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+ }
+ seq_printf(s, "\n");
+
+ seq_printf(s, "Command registers:\n");
+ seq_printf(s, "==================\n");
+ ret = smb347_read(smb, CMD_A);
+ seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
+ ret = smb347_read(smb, CMD_B);
+ seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
+ ret = smb347_read(smb, CMD_C);
+ seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
+ seq_printf(s, "\n");
+
+ seq_printf(s, "Interrupt status registers:\n");
+ seq_printf(s, "===========================\n");
+ for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
+ ret = smb347_read(smb, reg);
+ seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+ }
+ seq_printf(s, "\n");
+
+ seq_printf(s, "Status registers:\n");
+ seq_printf(s, "=================\n");
+ for (reg = STAT_A; reg <= STAT_E; reg++) {
+ ret = smb347_read(smb, reg);
+ seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+ }
+
+ return 0;
+}
+
+static int smb347_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smb347_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations smb347_debugfs_fops = {
+ .open = smb347_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smb347_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static char *battery[] = { "smb347-battery" };
+ const struct smb347_charger_platform_data *pdata;
+ struct device *dev = &client->dev;
+ struct smb347_charger *smb;
+ int ret;
+
+ pdata = dev->platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ if (!pdata->use_mains && !pdata->use_usb)
+ return -EINVAL;
+
+ smb = devm_kzalloc(dev, sizeof(*smb), GFP_KERNEL);
+ if (!smb)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, smb);
+
+ mutex_init(&smb->lock);
+ smb->client = client;
+ smb->pdata = pdata;
+
+ ret = smb347_hw_init(smb);
+ if (ret < 0)
+ return ret;
+
+ smb->mains.name = "smb347-mains";
+ smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+ smb->mains.get_property = smb347_mains_get_property;
+ smb->mains.properties = smb347_mains_properties;
+ smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+ smb->mains.supplied_to = battery;
+ smb->mains.num_supplicants = ARRAY_SIZE(battery);
+
+ smb->usb.name = "smb347-usb";
+ smb->usb.type = POWER_SUPPLY_TYPE_USB;
+ smb->usb.get_property = smb347_usb_get_property;
+ smb->usb.properties = smb347_usb_properties;
+ smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+ smb->usb.supplied_to = battery;
+ smb->usb.num_supplicants = ARRAY_SIZE(battery);
+
+ smb->battery.name = "smb347-battery";
+ smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ smb->battery.get_property = smb347_battery_get_property;
+ smb->battery.properties = smb347_battery_properties;
+ smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
+
+ ret = power_supply_register(dev, &smb->mains);
+ if (ret < 0)
+ return ret;
+
+ ret = power_supply_register(dev, &smb->usb);
+ if (ret < 0) {
+ power_supply_unregister(&smb->mains);
+ return ret;
+ }
+
+ ret = power_supply_register(dev, &smb->battery);
+ if (ret < 0) {
+ power_supply_unregister(&smb->usb);
+ power_supply_unregister(&smb->mains);
+ return ret;
+ }
+
+ /*
+ * Interrupt pin is optional. If it is connected, we setup the
+ * interrupt support here.
+ */
+ if (pdata->irq_gpio >= 0) {
+ ret = smb347_irq_init(smb);
+ if (ret < 0) {
+ dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
+ dev_warn(dev, "disabling IRQ support\n");
+ }
+ }
+
+ smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
+ &smb347_debugfs_fops);
+ return 0;
+}
+
+static int smb347_remove(struct i2c_client *client)
+{
+ struct smb347_charger *smb = i2c_get_clientdata(client);
+
+ if (!IS_ERR_OR_NULL(smb->dentry))
+ debugfs_remove(smb->dentry);
+
+ if (client->irq) {
+ smb347_irq_disable(smb);
+ free_irq(client->irq, smb);
+ gpio_free(smb->pdata->irq_gpio);
+ }
+
+ power_supply_unregister(&smb->battery);
+ power_supply_unregister(&smb->usb);
+ power_supply_unregister(&smb->mains);
+ return 0;
+}
+
+static const struct i2c_device_id smb347_id[] = {
+ { "smb347", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, smb347_id);
+
+static struct i2c_driver smb347_driver = {
+ .driver = {
+ .name = "smb347",
+ },
+ .probe = smb347_probe,
+ .remove = __devexit_p(smb347_remove),
+ .id_table = smb347_id,
+};
+
+static int __init smb347_init(void)
+{
+ return i2c_add_driver(&smb347_driver);
+}
+module_init(smb347_init);
+
+static void __exit smb347_exit(void)
+{
+ i2c_del_driver(&smb347_driver);
+}
+module_exit(smb347_exit);
+
+MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("SMB347 battery charger driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:smb347");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 636ebb2a0e80..8c9a607ea77a 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -316,19 +316,7 @@ static struct i2c_driver z2_batt_driver = {
.remove = __devexit_p(z2_batt_remove),
.id_table = z2_batt_id,
};
-
-static int __init z2_batt_init(void)
-{
- return i2c_add_driver(&z2_batt_driver);
-}
-
-static void __exit z2_batt_exit(void)
-{
- i2c_del_driver(&z2_batt_driver);
-}
-
-module_init(z2_batt_init);
-module_exit(z2_batt_exit);
+module_i2c_driver(z2_batt_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index dc87eda65814..eb415bd76494 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -458,6 +458,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (rtc->uie_rtctimer.enabled == enabled)
goto out;
+ if (rtc->uie_unsupported) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (enabled) {
struct rtc_time tm;
ktime_t now, onesec;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index e954a759ba85..42f5f829b3ee 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
&mpc5200_rtc_ops, THIS_MODULE);
}
+ rtc->rtc->uie_unsupported = 1;
+
if (IS_ERR(rtc->rtc)) {
err = PTR_ERR(rtc->rtc);
goto out_free_irq;
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 4940fa8c4e10..50a5c4adee48 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -33,6 +33,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 7d48700257a7..9328121804bb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -341,10 +341,10 @@ MODULE_PARM_DESC(aic79xx,
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
" slowcrc Turn on the SLOWCRC bit (Rev B only)\n"
"\n"
-" Sample /etc/modprobe.conf line:\n"
-" Enable verbose logging\n"
-" Set tag depth on Controller 2/Target 2 to 10 tags\n"
-" Shorten the selection timeout to 128ms\n"
+" Sample modprobe configuration file:\n"
+" # Enable verbose logging\n"
+" # Set tag depth on Controller 2/Target 2 to 10 tags\n"
+" # Shorten the selection timeout to 128ms\n"
"\n"
" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6251bb4f438..5a477cdc780d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -360,10 +360,10 @@ MODULE_PARM_DESC(aic7xxx,
" seltime:<int> Selection Timeout\n"
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
"\n"
-" Sample /etc/modprobe.conf line:\n"
-" Toggle EISA/VLB probing\n"
-" Set tag depth on Controller 1/Target 1 to 10 tags\n"
-" Shorten the selection timeout to 128ms\n"
+" Sample modprobe configuration file:\n"
+" # Toggle EISA/VLB probing\n"
+" # Set tag depth on Controller 1/Target 1 to 10 tags\n"
+" # Shorten the selection timeout to 128ms\n"
"\n"
" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
);
diff --git a/drivers/sh/intc/balancing.c b/drivers/sh/intc/balancing.c
index cec7a96f2c09..bc780807ccb0 100644
--- a/drivers/sh/intc/balancing.c
+++ b/drivers/sh/intc/balancing.c
@@ -9,7 +9,7 @@
*/
#include "internals.h"
-static unsigned long dist_handle[NR_IRQS];
+static unsigned long dist_handle[INTC_NR_IRQS];
void intc_balancing_enable(unsigned int irq)
{
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 7b246efa94ea..012df2676a26 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -2,13 +2,14 @@
* IRQ chip definitions for INTC IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
- * Copyright (C) 2009, 2010 Paul Mundt
+ * Copyright (C) 2009 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpumask.h>
+#include <linux/bsearch.h>
#include <linux/io.h>
#include "internals.h"
@@ -58,11 +59,6 @@ static void intc_disable(struct irq_data *data)
}
}
-static int intc_set_wake(struct irq_data *data, unsigned int on)
-{
- return 0; /* allow wakeup, but setup hardware in intc_suspend() */
-}
-
#ifdef CONFIG_SMP
/*
* This is held with the irq desc lock held, so we don't require any
@@ -78,7 +74,7 @@ static int intc_set_affinity(struct irq_data *data,
cpumask_copy(data->affinity, cpumask);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
#endif
@@ -122,28 +118,12 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
unsigned int nr_hp,
unsigned int irq)
{
- int i;
-
- /*
- * this doesn't scale well, but...
- *
- * this function should only be used for cerain uncommon
- * operations such as intc_set_priority() and intc_set_type()
- * and in those rare cases performance doesn't matter that much.
- * keeping the memory footprint low is more important.
- *
- * one rather simple way to speed this up and still keep the
- * memory footprint down is to make sure the array is sorted
- * and then perform a bisect to lookup the irq.
- */
- for (i = 0; i < nr_hp; i++) {
- if ((hp + i)->irq != irq)
- continue;
+ struct intc_handle_int key;
- return hp + i;
- }
+ key.irq = irq;
+ key.handle = 0;
- return NULL;
+ return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp);
}
int intc_set_priority(unsigned int irq, unsigned int prio)
@@ -223,10 +203,9 @@ struct irq_chip intc_irq_chip = {
.irq_mask_ack = intc_mask_ack,
.irq_enable = intc_enable,
.irq_disable = intc_disable,
- .irq_shutdown = intc_disable,
.irq_set_type = intc_set_type,
- .irq_set_wake = intc_set_wake,
#ifdef CONFIG_SMP
.irq_set_affinity = intc_set_affinity,
#endif
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index e53e449b4eca..7e562ccb6997 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -2,7 +2,7 @@
* Shared interrupt handling code for IPR and INTC2 types of IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
- * Copyright (C) 2009, 2010 Paul Mundt
+ * Copyright (C) 2009 - 2012 Paul Mundt
*
* Based on intc2.c and ipr.c
*
@@ -31,18 +31,19 @@
#include <linux/spinlock.h>
#include <linux/radix-tree.h>
#include <linux/export.h>
+#include <linux/sort.h>
#include "internals.h"
LIST_HEAD(intc_list);
DEFINE_RAW_SPINLOCK(intc_big_lock);
-unsigned int nr_intc_controllers;
+static unsigned int nr_intc_controllers;
/*
* Default priority level
* - this needs to be at least 2 for 5-bit priorities on 7780
*/
static unsigned int default_prio_level = 2; /* 2 - 16 */
-static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int intc_prio_level[INTC_NR_IRQS]; /* for now */
unsigned int intc_get_dfl_prio_level(void)
{
@@ -267,6 +268,9 @@ int __init register_intc_controller(struct intc_desc *desc)
k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
}
+
+ sort(d->prio, hw->nr_prio_regs, sizeof(*d->prio),
+ intc_handle_int_cmp, NULL);
}
if (hw->sense_regs) {
@@ -277,6 +281,9 @@ int __init register_intc_controller(struct intc_desc *desc)
for (i = 0; i < hw->nr_sense_regs; i++)
k += save_reg(d, k, hw->sense_regs[i].reg, 0);
+
+ sort(d->sense, hw->nr_sense_regs, sizeof(*d->sense),
+ intc_handle_int_cmp, NULL);
}
if (hw->subgroups)
diff --git a/drivers/sh/intc/handle.c b/drivers/sh/intc/handle.c
index 057ce56829bf..7863a44918a2 100644
--- a/drivers/sh/intc/handle.c
+++ b/drivers/sh/intc/handle.c
@@ -13,7 +13,7 @@
#include <linux/spinlock.h>
#include "internals.h"
-static unsigned long ack_handle[NR_IRQS];
+static unsigned long ack_handle[INTC_NR_IRQS];
static intc_enum __init intc_grp_id(struct intc_desc *desc,
intc_enum enum_id)
@@ -172,9 +172,8 @@ intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d,
return 0;
}
-static unsigned int __init intc_ack_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id)
+static unsigned int intc_ack_data(struct intc_desc *desc,
+ struct intc_desc_int *d, intc_enum enum_id)
{
struct intc_mask_reg *mr = desc->hw.ack_regs;
unsigned int i, j, fn, mode;
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index b0e9155ff739..f034a979a16f 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -108,6 +108,14 @@ static inline void activate_irq(int irq)
#endif
}
+static inline int intc_handle_int_cmp(const void *a, const void *b)
+{
+ const struct intc_handle_int *_a = a;
+ const struct intc_handle_int *_b = b;
+
+ return _a->irq - _b->irq;
+}
+
/* access.c */
extern unsigned long
(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data);
@@ -157,7 +165,6 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
/* core.c */
extern struct list_head intc_list;
extern raw_spinlock_t intc_big_lock;
-extern unsigned int nr_intc_controllers;
extern struct bus_type intc_subsys;
unsigned int intc_get_dfl_prio_level(void);
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index c7ec49ffd9f6..93cec21e788b 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -17,7 +17,7 @@
#include <linux/export.h>
#include "internals.h"
-static struct intc_map_entry intc_irq_xlate[NR_IRQS];
+static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS];
struct intc_virq_list {
unsigned int irq;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 8418eb036651..b9f0192758d6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/types.h>
#include "spi-dw.h"
@@ -136,6 +137,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ txconf.device_fc = false;
txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
(unsigned long) &txconf);
@@ -144,7 +146,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
dws->tx_sgl.dma_address = dws->tx_dma;
dws->tx_sgl.length = dws->len;
- txdesc = txchan->device->device_prep_slave_sg(txchan,
+ txdesc = dmaengine_prep_slave_sg(txchan,
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
@@ -158,6 +160,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ rxconf.device_fc = false;
rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
(unsigned long) &rxconf);
@@ -166,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
dws->rx_sgl.dma_address = dws->rx_dma;
dws->rx_sgl.length = dws->len;
- rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index d46e55c720b7..6db2887852d6 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -633,8 +633,8 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
if (!nents)
return ERR_PTR(-ENOMEM);
- txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
- slave_dirn, DMA_CTRL_ACK);
+ txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents,
+ slave_dirn, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index dc8485d1e883..96f0da66b185 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -880,10 +880,12 @@ static int configure_dma(struct pl022 *pl022)
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
.direction = DMA_DEV_TO_MEM,
+ .device_fc = false,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
.direction = DMA_MEM_TO_DEV,
+ .device_fc = false,
};
unsigned int pages;
int ret;
@@ -1017,7 +1019,7 @@ static int configure_dma(struct pl022 *pl022)
goto err_tx_sgmap;
/* Send both scatterlists */
- rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
pl022->sgt_rx.sgl,
rx_sglen,
DMA_DEV_TO_MEM,
@@ -1025,7 +1027,7 @@ static int configure_dma(struct pl022 *pl022)
if (!rxdesc)
goto err_rxdesc;
- txdesc = txchan->device->device_prep_slave_sg(txchan,
+ txdesc = dmaengine_prep_slave_sg(txchan,
pl022->sgt_tx.sgl,
tx_sglen,
DMA_MEM_TO_DEV,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 5c6fa5ed3366..ec47d3bdfd13 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1099,7 +1099,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
}
sg = dma->sg_rx_p;
- desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
+ desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
@@ -1158,7 +1158,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
}
sg = dma->sg_tx_p;
- desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
+ desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 59e095362c81..c2832124bb3e 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -381,8 +381,7 @@ int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
repeat:
fdt = files_fdtable(files);
- fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
- files->next_fd);
+ fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
/*
* N.B. For clone tasks sharing a files structure, this test
@@ -410,11 +409,11 @@ repeat:
goto repeat;
}
- FD_SET(fd, fdt->open_fds);
+ __set_open_fd(fd, fdt);
if (flags & O_CLOEXEC)
- FD_SET(fd, fdt->close_on_exec);
+ __set_close_on_exec(fd, fdt);
else
- FD_CLR(fd, fdt->close_on_exec);
+ __clear_close_on_exec(fd, fdt);
files->next_fd = fd + 1;
#if 1
/* Sanity check */
@@ -455,7 +454,7 @@ static void task_fd_install(
static void __put_unused_fd(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = files_fdtable(files);
- __FD_CLR(fd, fdt->open_fds);
+ __clear_open_fd(fd, fdt);
if (fd < files->next_fd)
files->next_fd = fd;
}
@@ -481,7 +480,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
if (!filp)
goto out_unlock;
rcu_assign_pointer(fdt->fd[fd], NULL);
- FD_CLR(fd, fdt->close_on_exec);
+ __clear_close_on_exec(fd, fdt);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
retval = filp_close(filp, files);
diff --git a/drivers/staging/asus_oled/README b/drivers/staging/asus_oled/README
index 0d82a6d5fa58..2d721232467a 100644
--- a/drivers/staging/asus_oled/README
+++ b/drivers/staging/asus_oled/README
@@ -52,7 +52,7 @@ Configuration
There is only one option: start_off.
You can use it by: 'modprobe asus_oled start_off=1', or by adding this
- line to /etc/modprobe.conf:
+ line to /etc/modprobe.d/asus_oled.conf:
options asus_oled start_off=1
With this option provided, asus_oled driver will switch off the display
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7f71b2d3101..514a691abea0 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -18,3 +18,11 @@ config THERMAL_HWMON
depends on THERMAL
depends on HWMON=y || HWMON=THERMAL
default y
+
+config SPEAR_THERMAL
+ bool "SPEAr thermal sensor driver"
+ depends on THERMAL
+ depends on PLAT_SPEAR
+ help
+ Enable this to plug the SPEAr thermal sensor driver into the Linux
+ thermal framework
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 31108a01c22e..a9fff0bf4b14 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_THERMAL) += thermal_sys.o
+obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o \ No newline at end of file
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
new file mode 100644
index 000000000000..c2e32df3b164
--- /dev/null
+++ b/drivers/thermal/spear_thermal.c
@@ -0,0 +1,206 @@
+/*
+ * SPEAr thermal driver.
+ *
+ * Copyright (C) 2011-2012 ST Microelectronics
+ * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/spear_thermal.h>
+#include <linux/thermal.h>
+
+#define MD_FACTOR 1000
+
+/* SPEAr Thermal Sensor Dev Structure */
+struct spear_thermal_dev {
+ /* pointer to base address of the thermal sensor */
+ void __iomem *thermal_base;
+ /* clk structure */
+ struct clk *clk;
+ /* pointer to thermal flags */
+ unsigned int flags;
+};
+
+static inline int thermal_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct spear_thermal_dev *stdev = thermal->devdata;
+
+ /*
+ * Data are ready to be read after 628 usec from POWERDOWN signal
+ * (PDN) = 1
+ */
+ *temp = (readl_relaxed(stdev->thermal_base) & 0x7F) * MD_FACTOR;
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = thermal_get_temp,
+};
+
+#ifdef CONFIG_PM
+static int spear_thermal_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+ struct spear_thermal_dev *stdev = spear_thermal->devdata;
+ unsigned int actual_mask = 0;
+
+ /* Disable SPEAr Thermal Sensor */
+ actual_mask = readl_relaxed(stdev->thermal_base);
+ writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
+
+ clk_disable(stdev->clk);
+ dev_info(dev, "Suspended.\n");
+
+ return 0;
+}
+
+static int spear_thermal_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+ struct spear_thermal_dev *stdev = spear_thermal->devdata;
+ unsigned int actual_mask = 0;
+ int ret = 0;
+
+ ret = clk_enable(stdev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable clock\n");
+ return ret;
+ }
+
+ /* Enable SPEAr Thermal Sensor */
+ actual_mask = readl_relaxed(stdev->thermal_base);
+ writel_relaxed(actual_mask | stdev->flags, stdev->thermal_base);
+
+ dev_info(dev, "Resumed.\n");
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
+ spear_thermal_resume);
+
+static int spear_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *spear_thermal = NULL;
+ struct spear_thermal_dev *stdev;
+ struct spear_thermal_pdata *pdata;
+ int ret = 0;
+ struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!stres) {
+ dev_err(&pdev->dev, "memory resource missing\n");
+ return -ENODEV;
+ }
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data is NULL\n");
+ return -EINVAL;
+ }
+
+ stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
+ if (!stdev) {
+ dev_err(&pdev->dev, "kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ /* Enable thermal sensor */
+ stdev->thermal_base = devm_ioremap(&pdev->dev, stres->start,
+ resource_size(stres));
+ if (!stdev->thermal_base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ stdev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(stdev->clk)) {
+ dev_err(&pdev->dev, "Can't get clock\n");
+ return PTR_ERR(stdev->clk);
+ }
+
+ ret = clk_enable(stdev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable clock\n");
+ goto put_clk;
+ }
+
+ stdev->flags = pdata->thermal_flags;
+ writel_relaxed(stdev->flags, stdev->thermal_base);
+
+ spear_thermal = thermal_zone_device_register("spear_thermal", 0,
+ stdev, &ops, 0, 0, 0, 0);
+ if (IS_ERR(spear_thermal)) {
+ dev_err(&pdev->dev, "thermal zone device is NULL\n");
+ ret = PTR_ERR(spear_thermal);
+ goto disable_clk;
+ }
+
+ platform_set_drvdata(pdev, spear_thermal);
+
+ dev_info(&spear_thermal->device, "Thermal Sensor Loaded at: 0x%p.\n",
+ stdev->thermal_base);
+
+ return 0;
+
+disable_clk:
+ clk_disable(stdev->clk);
+put_clk:
+ clk_put(stdev->clk);
+
+ return ret;
+}
+
+static int spear_thermal_exit(struct platform_device *pdev)
+{
+ unsigned int actual_mask = 0;
+ struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+ struct spear_thermal_dev *stdev = spear_thermal->devdata;
+
+ thermal_zone_device_unregister(spear_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ /* Disable SPEAr Thermal Sensor */
+ actual_mask = readl_relaxed(stdev->thermal_base);
+ writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
+
+ clk_disable(stdev->clk);
+ clk_put(stdev->clk);
+
+ return 0;
+}
+
+static struct platform_driver spear_thermal_driver = {
+ .probe = spear_thermal_probe,
+ .remove = spear_thermal_exit,
+ .driver = {
+ .name = "spear_thermal",
+ .owner = THIS_MODULE,
+ .pm = &spear_thermal_pm_ops,
+ },
+};
+
+module_platform_driver(spear_thermal_driver);
+
+MODULE_AUTHOR("Vincenzo Frascino <vincenzo.frascino@st.com>");
+MODULE_DESCRIPTION("SPEAr thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 220ce7e31cf5..022bacb71a7e 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -23,6 +23,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -39,8 +41,6 @@ MODULE_AUTHOR("Zhang Rui");
MODULE_DESCRIPTION("Generic thermal management sysfs support");
MODULE_LICENSE("GPL");
-#define PREFIX "Thermal: "
-
struct thermal_cooling_device_instance {
int id;
char name[THERMAL_NAME_LENGTH];
@@ -60,13 +60,11 @@ static LIST_HEAD(thermal_tz_list);
static LIST_HEAD(thermal_cdev_list);
static DEFINE_MUTEX(thermal_list_lock);
-static unsigned int thermal_event_seqnum;
-
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
- again:
+again:
if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
return -ENOMEM;
@@ -152,9 +150,9 @@ mode_store(struct device *dev, struct device_attribute *attr,
if (!tz->ops->set_mode)
return -EPERM;
- if (!strncmp(buf, "enabled", sizeof("enabled")))
+ if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
- else if (!strncmp(buf, "disabled", sizeof("disabled")))
+ else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
else
result = -EINVAL;
@@ -283,8 +281,7 @@ passive_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(type, 0444, type_show, NULL);
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
-static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, \
- passive_store);
+static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
static struct device_attribute trip_point_attrs[] = {
__ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
@@ -313,22 +310,6 @@ static struct device_attribute trip_point_attrs[] = {
__ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
};
-#define TRIP_POINT_ATTR_ADD(_dev, _index, result) \
-do { \
- result = device_create_file(_dev, \
- &trip_point_attrs[_index * 2]); \
- if (result) \
- break; \
- result = device_create_file(_dev, \
- &trip_point_attrs[_index * 2 + 1]); \
-} while (0)
-
-#define TRIP_POINT_ATTR_REMOVE(_dev, _index) \
-do { \
- device_remove_file(_dev, &trip_point_attrs[_index * 2]); \
- device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \
-} while (0)
-
/* sys I/F for cooling device */
#define to_cooling_device(_dev) \
container_of(_dev, struct thermal_cooling_device, device)
@@ -835,15 +816,14 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
return 0;
device_remove_file(&tz->device, &dev->attr);
- remove_symbol_link:
+remove_symbol_link:
sysfs_remove_link(&tz->device.kobj, dev->name);
- release_idr:
+release_idr:
release_idr(&tz->idr, &tz->lock, dev->id);
- free_mem:
+free_mem:
kfree(dev);
return result;
}
-
EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
/**
@@ -873,14 +853,13 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
return -ENODEV;
- unbind:
+unbind:
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
release_idr(&tz->idr, &tz->lock, pos->id);
kfree(pos);
return 0;
}
-
EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
static void thermal_release(struct device *dev)
@@ -888,7 +867,8 @@ static void thermal_release(struct device *dev)
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
- if (!strncmp(dev_name(dev), "thermal_zone", sizeof "thermal_zone" - 1)) {
+ if (!strncmp(dev_name(dev), "thermal_zone",
+ sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
kfree(tz);
} else {
@@ -908,8 +888,9 @@ static struct class thermal_class = {
* @devdata: device private data.
* @ops: standard thermal cooling devices callbacks.
*/
-struct thermal_cooling_device *thermal_cooling_device_register(
- char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos;
@@ -974,12 +955,11 @@ struct thermal_cooling_device *thermal_cooling_device_register(
if (!result)
return cdev;
- unregister:
+unregister:
release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
device_unregister(&cdev->device);
return ERR_PTR(result);
}
-
EXPORT_SYMBOL(thermal_cooling_device_register);
/**
@@ -1024,7 +1004,6 @@ void thermal_cooling_device_unregister(struct
device_unregister(&cdev->device);
return;
}
-
EXPORT_SYMBOL(thermal_cooling_device_unregister);
/**
@@ -1044,8 +1023,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
if (tz->ops->get_temp(tz, &temp)) {
/* get_temp failed - retry it later */
- printk(KERN_WARNING PREFIX "failed to read out thermal zone "
- "%d\n", tz->id);
+ pr_warn("failed to read out thermal zone %d\n", tz->id);
goto leave;
}
@@ -1060,9 +1038,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
ret = tz->ops->notify(tz, count,
trip_type);
if (!ret) {
- printk(KERN_EMERG
- "Critical temperature reached (%ld C), shutting down.\n",
- temp/1000);
+ pr_emerg("Critical temperature reached (%ld C), shutting down\n",
+ temp/1000);
orderly_poweroff(true);
}
}
@@ -1100,7 +1077,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
tz->last_temperature = temp;
- leave:
+leave:
if (tz->passive)
thermal_zone_device_set_polling(tz, tz->passive_delay);
else if (tz->polling_delay)
@@ -1199,7 +1176,12 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
}
for (count = 0; count < trips; count++) {
- TRIP_POINT_ATTR_ADD(&tz->device, count, result);
+ result = device_create_file(&tz->device,
+ &trip_point_attrs[count * 2]);
+ if (result)
+ break;
+ result = device_create_file(&tz->device,
+ &trip_point_attrs[count * 2 + 1]);
if (result)
goto unregister;
tz->ops->get_trip_type(tz, count, &trip_type);
@@ -1235,12 +1217,11 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
if (!result)
return tz;
- unregister:
+unregister:
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
device_unregister(&tz->device);
return ERR_PTR(result);
}
-
EXPORT_SYMBOL(thermal_zone_device_register);
/**
@@ -1279,9 +1260,12 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
if (tz->ops->get_mode)
device_remove_file(&tz->device, &dev_attr_mode);
- for (count = 0; count < tz->trips; count++)
- TRIP_POINT_ATTR_REMOVE(&tz->device, count);
-
+ for (count = 0; count < tz->trips; count++) {
+ device_remove_file(&tz->device,
+ &trip_point_attrs[count * 2]);
+ device_remove_file(&tz->device,
+ &trip_point_attrs[count * 2 + 1]);
+ }
thermal_remove_hwmon_sysfs(tz);
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
idr_destroy(&tz->idr);
@@ -1289,7 +1273,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
device_unregister(&tz->device);
return;
}
-
EXPORT_SYMBOL(thermal_zone_device_unregister);
#ifdef CONFIG_NET
@@ -1312,10 +1295,11 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
void *msg_header;
int size;
int result;
+ static unsigned int thermal_event_seqnum;
/* allocate memory */
- size = nla_total_size(sizeof(struct thermal_genl_event)) + \
- nla_total_size(0);
+ size = nla_total_size(sizeof(struct thermal_genl_event)) +
+ nla_total_size(0);
skb = genlmsg_new(size, GFP_ATOMIC);
if (!skb)
@@ -1331,8 +1315,8 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
}
/* fill the data */
- attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
- sizeof(struct thermal_genl_event));
+ attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
+ sizeof(struct thermal_genl_event));
if (!attr) {
nlmsg_free(skb);
@@ -1359,7 +1343,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
if (result)
- printk(KERN_INFO "failed to send netlink event:%d", result);
+ pr_info("failed to send netlink event:%d\n", result);
return result;
}
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 794ecb40017c..e1235accab74 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -102,7 +102,7 @@
* You can find the original tools for this direct from Multitech
* ftp://ftp.multitech.com/ISI-Cards/
*
- * Having installed the cards the module options (/etc/modprobe.conf)
+ * Having installed the cards the module options (/etc/modprobe.d/)
*
* options isicom io=card1,card2,card3,card4 irq=card1,card2,card3,card4
*
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 20d795d9b591..0c65c9e66986 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -51,6 +51,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
+#include <linux/types.h>
#include <asm/io.h>
#include <asm/sizes.h>
@@ -271,6 +272,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.direction = DMA_MEM_TO_DEV,
.dst_maxburst = uap->fifosize >> 1,
+ .device_fc = false,
};
struct dma_chan *chan;
dma_cap_mask_t mask;
@@ -304,6 +306,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.direction = DMA_DEV_TO_MEM,
.src_maxburst = uap->fifosize >> 1,
+ .device_fc = false,
};
chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
@@ -481,7 +484,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
return -EBUSY;
}
- desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -664,7 +667,6 @@ static void pl011_dma_rx_callback(void *data);
static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
{
struct dma_chan *rxchan = uap->dmarx.chan;
- struct dma_device *dma_dev;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
struct pl011_sgbuf *sgbuf;
@@ -675,8 +677,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
/* Start the RX DMA job */
sgbuf = uap->dmarx.use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- dma_dev = rxchan->device;
- desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 332f2eb8abbc..e825460478be 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -844,7 +844,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
sg_dma_address(sg) = priv->rx_buf_dma;
- desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
+ desc = dmaengine_prep_slave_sg(priv->chan_rx,
sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -1003,7 +1003,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
sg_dma_len(sg) = size;
}
- desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
+ desc = dmaengine_prep_slave_sg(priv->chan_tx,
priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 61b7fd2729cd..bf461cf99616 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1229,17 +1229,20 @@ static void sci_dma_tx_complete(void *arg)
port->icount.tx += sg_dma_len(&s->sg_tx);
async_tx_ack(s->desc_tx);
- s->cookie_tx = -EINVAL;
s->desc_tx = NULL;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit)) {
+ s->cookie_tx = 0;
schedule_work(&s->work_tx);
- } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 ctrl = sci_in(port, SCSCR);
- sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+ } else {
+ s->cookie_tx = -EINVAL;
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+ }
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -1338,7 +1341,7 @@ static void sci_submit_rx(struct sci_port *s)
struct scatterlist *sg = &s->sg_rx[i];
struct dma_async_tx_descriptor *desc;
- desc = chan->device->device_prep_slave_sg(chan,
+ desc = dmaengine_prep_slave_sg(chan,
sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (desc) {
@@ -1453,7 +1456,7 @@ static void work_fn_tx(struct work_struct *work)
BUG_ON(!sg_dma_len(sg));
- desc = chan->device->device_prep_slave_sg(chan,
+ desc = dmaengine_prep_slave_sg(chan,
sg, s->sg_len_tx, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
@@ -1501,8 +1504,10 @@ static void sci_start_tx(struct uart_port *port)
}
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
- s->cookie_tx < 0)
+ s->cookie_tx < 0) {
+ s->cookie_tx = 0;
schedule_work(&s->work_tx);
+ }
#endif
if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index 97cb45916c43..d05c7fbbb703 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -115,12 +115,12 @@ static bool ux500_configure_channel(struct dma_channel *channel,
slave_conf.dst_addr = usb_fifo_addr;
slave_conf.dst_addr_width = addr_width;
slave_conf.dst_maxburst = 16;
+ slave_conf.device_fc = false;
dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
(unsigned long) &slave_conf);
- dma_desc = dma_chan->device->
- device_prep_slave_sg(dma_chan, &sg, 1, direction,
+ dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma_desc)
return false;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 3648c82a17fe..6ec7f838d7fa 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -786,9 +786,8 @@ static void xfer_work(struct work_struct *work)
sg_dma_address(&sg) = pkt->dma + pkt->actual;
sg_dma_len(&sg) = pkt->trans;
- desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir,
- DMA_PREP_INTERRUPT |
- DMA_CTRL_ACK);
+ desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
return;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c229d304684..ff8605b4b4be 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1724,7 +1724,8 @@ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv)
/*
* Module parameter to control latency timer for NDI FTDI-based USB devices.
- * If this value is not set in modprobe.conf.local its value will be set to 1ms.
+ * If this value is not set in /etc/modprobe.d/ its value will be set
+ * to 1ms.
*/
static int ndi_latency_timer = 1;
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index fe2d803a6347..7691c866637b 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -222,7 +222,7 @@ config USB_LIBUSUAL
for usb-storage and ub drivers, and allows to switch binding
of these devices without rebuilding modules.
- Typical syntax of /etc/modprobe.conf is:
+ Typical syntax of /etc/modprobe.d/*conf is:
options libusual bias="ub"
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 727a5149d818..eec0d7b748eb 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -337,7 +337,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
/* This enables the channel */
if (mx3_fbi->cookie < 0) {
- mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
+ mx3_fbi->txd = dmaengine_prep_slave_sg(dma_chan,
&mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!mx3_fbi->txd) {
dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
@@ -1091,7 +1091,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
if (mx3_fbi->txd)
async_tx_ack(mx3_fbi->txd);
- txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
+ txd = dmaengine_prep_slave_sg(dma_chan, sg +
mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txd) {
dev_err(fbi->device,
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index fd2271600370..4e5b960c32c8 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -27,7 +27,6 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
-#include <mach/io.h>
#include <plat/vrfb.h>
#include <plat/sdrc.h>
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index d54e04df45e4..54984deb8561 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -28,6 +28,7 @@
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/timex.h>
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 85f1fcdb30e7..9dacb8586701 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -230,7 +230,7 @@ static void autofs_dev_ioctl_fd_install(unsigned int fd, struct file *file)
fdt = files_fdtable(files);
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
- FD_SET(fd, fdt->close_on_exec);
+ __set_close_on_exec(fd, fdt);
spin_unlock(&files->file_lock);
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d7ff206cdcb..48ffb3dc610a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1415,6 +1415,22 @@ static void do_thread_regset_writeback(struct task_struct *task,
regset->writeback(task, regset, 1);
}
+#ifndef PR_REG_SIZE
+#define PR_REG_SIZE(S) sizeof(S)
+#endif
+
+#ifndef PRSTATUS_SIZE
+#define PRSTATUS_SIZE(S) sizeof(S)
+#endif
+
+#ifndef PR_REG_PTR
+#define PR_REG_PTR(S) (&((S)->pr_reg))
+#endif
+
+#ifndef SET_PR_FPVALID
+#define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V))
+#endif
+
static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset_view *view,
long signr, size_t *total)
@@ -1429,11 +1445,11 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
*/
fill_prstatus(&t->prstatus, t->task, signr);
(void) view->regsets[0].get(t->task, &view->regsets[0],
- 0, sizeof(t->prstatus.pr_reg),
- &t->prstatus.pr_reg, NULL);
+ 0, PR_REG_SIZE(t->prstatus.pr_reg),
+ PR_REG_PTR(&t->prstatus), NULL);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
- sizeof(t->prstatus), &t->prstatus);
+ PRSTATUS_SIZE(t->prstatus), &t->prstatus);
*total += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
@@ -1463,7 +1479,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
regset->core_note_type,
size, data);
else {
- t->prstatus.pr_fpvalid = 1;
+ SET_PR_FPVALID(&t->prstatus, 1);
fill_note(&t->notes[i], "CORE",
NT_PRFPREG, size, data);
}
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0cc20b35c1c4..42704149b723 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -171,11 +171,11 @@ out:
spin_unlock_irqrestore(&workers->lock, flags);
}
-static noinline int run_ordered_completions(struct btrfs_workers *workers,
+static noinline void run_ordered_completions(struct btrfs_workers *workers,
struct btrfs_work *work)
{
if (!workers->ordered)
- return 0;
+ return;
set_bit(WORK_DONE_BIT, &work->flags);
@@ -213,7 +213,6 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
}
spin_unlock(&workers->order_lock);
- return 0;
}
static void put_worker(struct btrfs_worker_thread *worker)
@@ -399,7 +398,7 @@ again:
/*
* this will wait for all the worker threads to shutdown
*/
-int btrfs_stop_workers(struct btrfs_workers *workers)
+void btrfs_stop_workers(struct btrfs_workers *workers)
{
struct list_head *cur;
struct btrfs_worker_thread *worker;
@@ -427,7 +426,6 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
put_worker(worker);
}
spin_unlock_irq(&workers->lock);
- return 0;
}
/*
@@ -615,14 +613,14 @@ found:
* it was taken from. It is intended for use with long running work functions
* that make some progress and want to give the cpu up for others.
*/
-int btrfs_requeue_work(struct btrfs_work *work)
+void btrfs_requeue_work(struct btrfs_work *work)
{
struct btrfs_worker_thread *worker = work->worker;
unsigned long flags;
int wake = 0;
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
- goto out;
+ return;
spin_lock_irqsave(&worker->lock, flags);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
@@ -649,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work *work)
if (wake)
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
-out:
-
- return 0;
}
void btrfs_set_work_high_prio(struct btrfs_work *work)
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index f34cc31fa3c9..063698b90ce2 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -111,9 +111,9 @@ struct btrfs_workers {
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers);
-int btrfs_stop_workers(struct btrfs_workers *workers);
+void btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter);
-int btrfs_requeue_work(struct btrfs_work *work);
+void btrfs_requeue_work(struct btrfs_work *work);
void btrfs_set_work_high_prio(struct btrfs_work *work);
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0436c12da8c2..f4e90748940a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -116,6 +116,7 @@ add_parent:
* to a logical address
*/
static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
+ int search_commit_root,
struct __prelim_ref *ref,
struct ulist *parents)
{
@@ -131,6 +132,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->search_commit_root = !!search_commit_root;
root_key.objectid = ref->root_id;
root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -188,6 +190,7 @@ out:
* resolve all indirect backrefs from the list
*/
static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
+ int search_commit_root,
struct list_head *head)
{
int err;
@@ -212,7 +215,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
continue;
if (ref->count == 0)
continue;
- err = __resolve_indirect_ref(fs_info, ref, parents);
+ err = __resolve_indirect_ref(fs_info, search_commit_root,
+ ref, parents);
if (err) {
if (ret == 0)
ret = err;
@@ -586,6 +590,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head;
int info_level = 0;
int ret;
+ int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
struct list_head prefs_delayed;
struct list_head prefs;
struct __prelim_ref *ref;
@@ -600,6 +605,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->search_commit_root = !!search_commit_root;
/*
* grab both a lock on the path and a lock on the delayed ref head.
@@ -614,35 +620,39 @@ again:
goto out;
BUG_ON(ret == 0);
- /*
- * look if there are updates for this ref queued and lock the head
- */
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(trans, bytenr);
- if (head) {
- if (!mutex_trylock(&head->mutex)) {
- atomic_inc(&head->node.refs);
- spin_unlock(&delayed_refs->lock);
-
- btrfs_release_path(path);
-
- /*
- * Mutex was contended, block until it's
- * released and try again
- */
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
- goto again;
- }
- ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
- if (ret) {
- spin_unlock(&delayed_refs->lock);
- goto out;
+ if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
+ /*
+ * look if there are updates for this ref queued and lock the
+ * head
+ */
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+ head = btrfs_find_delayed_ref_head(trans, bytenr);
+ if (head) {
+ if (!mutex_trylock(&head->mutex)) {
+ atomic_inc(&head->node.refs);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_release_path(path);
+
+ /*
+ * Mutex was contended, block until it's
+ * released and try again
+ */
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref(&head->node);
+ goto again;
+ }
+ ret = __add_delayed_refs(head, seq, &info_key,
+ &prefs_delayed);
+ if (ret) {
+ spin_unlock(&delayed_refs->lock);
+ goto out;
+ }
}
+ spin_unlock(&delayed_refs->lock);
}
- spin_unlock(&delayed_refs->lock);
if (path->slots[0]) {
struct extent_buffer *leaf;
@@ -679,7 +689,7 @@ again:
if (ret)
goto out;
- ret = __resolve_indirect_refs(fs_info, &prefs);
+ ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
if (ret)
goto out;
@@ -1074,8 +1084,7 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
return 0;
}
-static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, u64 logical,
+static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
u64 orig_extent_item_objectid,
u64 extent_item_pos, u64 root,
iterate_extent_inodes_t *iterate, void *ctx)
@@ -1143,35 +1152,38 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
* calls iterate() for every inode that references the extent identified by
* the given parameters.
* when the iterator function returns a non-zero value, iteration stops.
- * path is guaranteed to be in released state when iterate() is called.
*/
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
u64 extent_item_objectid, u64 extent_item_pos,
+ int search_commit_root,
iterate_extent_inodes_t *iterate, void *ctx)
{
int ret;
struct list_head data_refs = LIST_HEAD_INIT(data_refs);
struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
struct btrfs_trans_handle *trans;
- struct ulist *refs;
- struct ulist *roots;
+ struct ulist *refs = NULL;
+ struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL;
struct seq_list seq_elem;
- struct btrfs_delayed_ref_root *delayed_refs;
-
- trans = btrfs_join_transaction(fs_info->extent_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ struct btrfs_delayed_ref_root *delayed_refs = NULL;
pr_debug("resolving all inodes for extent %llu\n",
extent_item_objectid);
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- btrfs_get_delayed_seq(delayed_refs, &seq_elem);
- spin_unlock(&delayed_refs->lock);
+ if (search_commit_root) {
+ trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
+ } else {
+ trans = btrfs_join_transaction(fs_info->extent_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+ btrfs_get_delayed_seq(delayed_refs, &seq_elem);
+ spin_unlock(&delayed_refs->lock);
+ }
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
extent_item_pos, seq_elem.seq,
@@ -1188,7 +1200,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
while (!ret && (root_node = ulist_next(roots, root_node))) {
pr_debug("root %llu references leaf %llu\n",
root_node->val, ref_node->val);
- ret = iterate_leaf_refs(fs_info, path, ref_node->val,
+ ret = iterate_leaf_refs(fs_info, ref_node->val,
extent_item_objectid,
extent_item_pos, root_node->val,
iterate, ctx);
@@ -1198,8 +1210,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ulist_free(refs);
ulist_free(roots);
out:
- btrfs_put_delayed_seq(delayed_refs, &seq_elem);
- btrfs_end_transaction(trans, fs_info->extent_root);
+ if (!search_commit_root) {
+ btrfs_put_delayed_seq(delayed_refs, &seq_elem);
+ btrfs_end_transaction(trans, fs_info->extent_root);
+ }
+
return ret;
}
@@ -1210,6 +1225,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
int ret;
u64 extent_item_pos;
struct btrfs_key found_key;
+ int search_commit_root = path->search_commit_root;
ret = extent_from_logical(fs_info, logical, path,
&found_key);
@@ -1220,8 +1236,9 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
return ret;
extent_item_pos = logical - found_key.objectid;
- ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
- extent_item_pos, iterate, ctx);
+ ret = iterate_extent_inodes(fs_info, found_key.objectid,
+ extent_item_pos, search_commit_root,
+ iterate, ctx);
return ret;
}
@@ -1342,12 +1359,6 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
inode_to_path, ipath);
}
-/*
- * allocates space to return multiple file system paths for an inode.
- * total_bytes to allocate are passed, note that space usable for actual path
- * information will be total_bytes - sizeof(struct inode_fs_paths).
- * the returned pointer must be freed with free_ipath() in the end.
- */
struct btrfs_data_container *init_data_container(u32 total_bytes)
{
struct btrfs_data_container *data;
@@ -1403,5 +1414,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
void free_ipath(struct inode_fs_paths *ipath)
{
+ kfree(ipath->fspath);
kfree(ipath);
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index d00dfa9ca934..57ea2e959e4d 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -22,6 +22,8 @@
#include "ioctl.h"
#include "ulist.h"
+#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
+
struct inode_fs_paths {
struct btrfs_path *btrfs_path;
struct btrfs_root *fs_root;
@@ -44,9 +46,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
u64 *out_root, u8 *out_level);
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
u64 extent_item_objectid,
- u64 extent_offset,
+ u64 extent_offset, int search_commit_root,
iterate_extent_inodes_t *iterate, void *ctx);
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b805afb37fa8..d286b40a5671 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -226,8 +226,8 @@ out:
* Clear the writeback bits on all of the file
* pages for a compressed write
*/
-static noinline int end_compressed_writeback(struct inode *inode, u64 start,
- unsigned long ram_size)
+static noinline void end_compressed_writeback(struct inode *inode, u64 start,
+ unsigned long ram_size)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
@@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,
index += ret;
}
/* the inode may be gone now */
- return 0;
}
/*
@@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
*/
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio,
start, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(bio);
@@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(bio);
return 0;
@@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* sure they map to this compressed extent on disk.
*/
set_page_extent_mapped(page);
- lock_extent(tree, last_offset, end, GFP_NOFS);
+ lock_extent(tree, last_offset, end);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE);
@@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_sector) {
free_extent_map(em);
- unlock_extent(tree, last_offset, end, GFP_NOFS);
+ unlock_extent(tree, last_offset, end);
unlock_page(page);
page_cache_release(page);
break;
@@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
nr_pages++;
page_cache_release(page);
} else {
- unlock_extent(tree, last_offset, end, GFP_NOFS);
+ unlock_extent(tree, last_offset, end);
unlock_page(page);
page_cache_release(page);
break;
@@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
/*
* inc the count before we submit the bio so
@@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode,
comp_bio, sums);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
sums += (comp_bio->bi_size + root->sectorsize - 1) /
root->sectorsize;
ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(comp_bio);
@@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(comp_bio);
return 0;
@@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = {
&btrfs_lzo_compress,
};
-int __init btrfs_init_compress(void)
+void __init btrfs_init_compress(void)
{
int i;
@@ -744,7 +743,6 @@ int __init btrfs_init_compress(void)
atomic_set(&comp_alloc_workspace[i], 0);
init_waitqueue_head(&comp_workspace_wait[i]);
}
- return 0;
}
/*
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index a12059f4f0fd..9afb0a62ae82 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -19,7 +19,7 @@
#ifndef __BTRFS_COMPRESSION_
#define __BTRFS_COMPRESSION_
-int btrfs_init_compress(void);
+void btrfs_init_compress(void);
void btrfs_exit_compress(void);
int btrfs_compress_pages(int type, struct address_space *mapping,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0639a555e16e..e801f226d7e0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -36,7 +36,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
-static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
struct btrfs_path *btrfs_alloc_path(void)
@@ -156,10 +156,23 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
{
struct extent_buffer *eb;
- rcu_read_lock();
- eb = rcu_dereference(root->node);
- extent_buffer_get(eb);
- rcu_read_unlock();
+ while (1) {
+ rcu_read_lock();
+ eb = rcu_dereference(root->node);
+
+ /*
+ * RCU really hurts here, we could free up the root node because
+ * it was cow'ed but we may not get the new root node yet so do
+ * the inc_not_zero dance and if it doesn't work then
+ * synchronize_rcu and try again.
+ */
+ if (atomic_inc_not_zero(&eb->refs)) {
+ rcu_read_unlock();
+ break;
+ }
+ rcu_read_unlock();
+ synchronize_rcu();
+ }
return eb;
}
@@ -331,8 +344,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (btrfs_block_can_be_shared(root, buf)) {
ret = btrfs_lookup_extent_info(trans, root, buf->start,
buf->len, &refs, &flags);
- BUG_ON(ret);
- BUG_ON(refs == 0);
+ if (ret)
+ return ret;
+ if (refs == 0) {
+ ret = -EROFS;
+ btrfs_std_error(root->fs_info, ret);
+ return ret;
+ }
} else {
refs = 1;
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
@@ -351,14 +369,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
ret = btrfs_inc_ref(trans, root, buf, 1, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_dec_ref(trans, root, buf, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
} else {
@@ -368,14 +386,15 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
if (new_flags != 0) {
ret = btrfs_set_disk_extent_flags(trans, root,
buf->start,
buf->len,
new_flags, 0);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
} else {
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
@@ -384,9 +403,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, buf, 1, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
clean_tree_block(trans, root, buf);
*last_ref = 1;
@@ -415,7 +434,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_disk_key disk_key;
struct extent_buffer *cow;
- int level;
+ int level, ret;
int last_ref = 0;
int unlock_orig = 0;
u64 parent_start;
@@ -467,7 +486,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
(unsigned long)btrfs_header_fsid(cow),
BTRFS_FSID_SIZE);
- update_ref_for_cow(trans, root, buf, cow, &last_ref);
+ ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
if (root->ref_cows)
btrfs_reloc_cow_block(trans, root, buf, cow);
@@ -504,7 +527,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
}
if (unlock_orig)
btrfs_tree_unlock(buf);
- free_extent_buffer(buf);
+ free_extent_buffer_stale(buf);
btrfs_mark_buffer_dirty(cow);
*cow_ret = cow;
return 0;
@@ -934,7 +957,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* promote the child to a root */
child = read_node_slot(root, mid, 0);
- BUG_ON(!child);
+ if (!child) {
+ ret = -EROFS;
+ btrfs_std_error(root->fs_info, ret);
+ goto enospc;
+ }
+
btrfs_tree_lock(child);
btrfs_set_lock_blocking(child);
ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
@@ -959,7 +987,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
/* once for the root ptr */
- free_extent_buffer(mid);
+ free_extent_buffer_stale(mid);
return 0;
}
if (btrfs_header_nritems(mid) >
@@ -1010,13 +1038,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (btrfs_header_nritems(right) == 0) {
clean_tree_block(trans, root, right);
btrfs_tree_unlock(right);
- wret = del_ptr(trans, root, path, level + 1, pslot +
- 1);
- if (wret)
- ret = wret;
+ del_ptr(trans, root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
btrfs_free_tree_block(trans, root, right, 0, 1, 0);
- free_extent_buffer(right);
+ free_extent_buffer_stale(right);
right = NULL;
} else {
struct btrfs_disk_key right_key;
@@ -1035,7 +1060,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
* otherwise we would have pulled some pointers from the
* right
*/
- BUG_ON(!left);
+ if (!left) {
+ ret = -EROFS;
+ btrfs_std_error(root->fs_info, ret);
+ goto enospc;
+ }
wret = balance_node_right(trans, root, mid, left);
if (wret < 0) {
ret = wret;
@@ -1051,12 +1080,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (btrfs_header_nritems(mid) == 0) {
clean_tree_block(trans, root, mid);
btrfs_tree_unlock(mid);
- wret = del_ptr(trans, root, path, level + 1, pslot);
- if (wret)
- ret = wret;
+ del_ptr(trans, root, path, level + 1, pslot);
root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
- free_extent_buffer(mid);
+ free_extent_buffer_stale(mid);
mid = NULL;
} else {
/* update the parent key to reflect our changes */
@@ -1382,7 +1409,8 @@ static noinline int reada_for_balance(struct btrfs_root *root,
* if lowest_unlock is 1, level 0 won't be unlocked
*/
static noinline void unlock_up(struct btrfs_path *path, int level,
- int lowest_unlock)
+ int lowest_unlock, int min_write_lock_level,
+ int *write_lock_level)
{
int i;
int skip_level = level;
@@ -1414,6 +1442,11 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
btrfs_tree_unlock_rw(t, path->locks[i]);
path->locks[i] = 0;
+ if (write_lock_level &&
+ i > min_write_lock_level &&
+ i <= *write_lock_level) {
+ *write_lock_level = i - 1;
+ }
}
}
}
@@ -1637,6 +1670,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
/* everything at write_lock_level or lower must be write locked */
int write_lock_level = 0;
u8 lowest_level = 0;
+ int min_write_lock_level;
lowest_level = p->lowest_level;
WARN_ON(lowest_level && ins_len > 0);
@@ -1664,6 +1698,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (cow && (p->keep_locks || p->lowest_level))
write_lock_level = BTRFS_MAX_LEVEL;
+ min_write_lock_level = write_lock_level;
+
again:
/*
* we try very hard to do read locks on the root
@@ -1795,7 +1831,8 @@ cow_done:
goto again;
}
- unlock_up(p, level, lowest_unlock);
+ unlock_up(p, level, lowest_unlock,
+ min_write_lock_level, &write_lock_level);
if (level == lowest_level) {
if (dec)
@@ -1857,7 +1894,8 @@ cow_done:
}
}
if (!p->search_for_split)
- unlock_up(p, level, lowest_unlock);
+ unlock_up(p, level, lowest_unlock,
+ min_write_lock_level, &write_lock_level);
goto done;
}
}
@@ -1881,15 +1919,12 @@ done:
* fixing up pointers when a given leaf/node is not in slot 0 of the
* higher levels
*
- * If this fails to write a tree block, it returns -1, but continues
- * fixing up the blocks in ram so the tree is consistent.
*/
-static int fixup_low_keys(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_disk_key *key, int level)
+static void fixup_low_keys(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_disk_key *key, int level)
{
int i;
- int ret = 0;
struct extent_buffer *t;
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -1902,7 +1937,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
if (tslot != 0)
break;
}
- return ret;
}
/*
@@ -1911,9 +1945,9 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
* This function isn't completely safe. It's the caller's responsibility
* that the new key won't break the order
*/
-int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *new_key)
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *new_key)
{
struct btrfs_disk_key disk_key;
struct extent_buffer *eb;
@@ -1923,13 +1957,11 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
slot = path->slots[0];
if (slot > 0) {
btrfs_item_key(eb, &disk_key, slot - 1);
- if (comp_keys(&disk_key, new_key) >= 0)
- return -1;
+ BUG_ON(comp_keys(&disk_key, new_key) >= 0);
}
if (slot < btrfs_header_nritems(eb) - 1) {
btrfs_item_key(eb, &disk_key, slot + 1);
- if (comp_keys(&disk_key, new_key) <= 0)
- return -1;
+ BUG_ON(comp_keys(&disk_key, new_key) <= 0);
}
btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -1937,7 +1969,6 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(eb);
if (slot == 0)
fixup_low_keys(trans, root, path, &disk_key, 1);
- return 0;
}
/*
@@ -2140,12 +2171,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
*
* slot and level indicate where you want the key to go, and
* blocknr is the block the key points to.
- *
- * returns zero on success and < 0 on any error
*/
-static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_path *path, struct btrfs_disk_key
- *key, u64 bytenr, int slot, int level)
+static void insert_ptr(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_disk_key *key, u64 bytenr,
+ int slot, int level)
{
struct extent_buffer *lower;
int nritems;
@@ -2155,8 +2185,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
lower = path->nodes[level];
nritems = btrfs_header_nritems(lower);
BUG_ON(slot > nritems);
- if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
- BUG();
+ BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
if (slot != nritems) {
memmove_extent_buffer(lower,
btrfs_node_key_ptr_offset(slot + 1),
@@ -2169,7 +2198,6 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_node_ptr_generation(lower, slot, trans->transid);
btrfs_set_header_nritems(lower, nritems + 1);
btrfs_mark_buffer_dirty(lower);
- return 0;
}
/*
@@ -2190,7 +2218,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
int mid;
int ret;
- int wret;
u32 c_nritems;
c = path->nodes[level];
@@ -2247,11 +2274,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
- wret = insert_ptr(trans, root, path, &disk_key, split->start,
- path->slots[level + 1] + 1,
- level + 1);
- if (wret)
- ret = wret;
+ insert_ptr(trans, root, path, &disk_key, split->start,
+ path->slots[level + 1] + 1, level + 1);
if (path->slots[level] >= mid) {
path->slots[level] -= mid;
@@ -2320,6 +2344,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
{
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1];
+ struct btrfs_map_token token;
struct btrfs_disk_key disk_key;
int slot;
u32 i;
@@ -2331,6 +2356,8 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
u32 data_end;
u32 this_item_size;
+ btrfs_init_map_token(&token);
+
if (empty)
nr = 0;
else
@@ -2408,8 +2435,8 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
push_space = BTRFS_LEAF_DATA_SIZE(root);
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(right, i);
- push_space -= btrfs_item_size(right, item);
- btrfs_set_item_offset(right, item, push_space);
+ push_space -= btrfs_token_item_size(right, item, &token);
+ btrfs_set_token_item_offset(right, item, push_space, &token);
}
left_nritems -= push_items;
@@ -2537,9 +2564,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
u32 old_left_nritems;
u32 nr;
int ret = 0;
- int wret;
u32 this_item_size;
u32 old_left_item_size;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
if (empty)
nr = min(right_nritems, max_slot);
@@ -2600,9 +2629,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
item = btrfs_item_nr(left, i);
- ioff = btrfs_item_offset(left, item);
- btrfs_set_item_offset(left, item,
- ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
+ ioff = btrfs_token_item_offset(left, item, &token);
+ btrfs_set_token_item_offset(left, item,
+ ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
+ &token);
}
btrfs_set_header_nritems(left, old_left_nritems + push_items);
@@ -2632,8 +2662,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(right, i);
- push_space = push_space - btrfs_item_size(right, item);
- btrfs_set_item_offset(right, item, push_space);
+ push_space = push_space - btrfs_token_item_size(right,
+ item, &token);
+ btrfs_set_token_item_offset(right, item, push_space, &token);
}
btrfs_mark_buffer_dirty(left);
@@ -2643,9 +2674,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
clean_tree_block(trans, root, right);
btrfs_item_key(right, &disk_key, 0);
- wret = fixup_low_keys(trans, root, path, &disk_key, 1);
- if (wret)
- ret = wret;
+ fixup_low_keys(trans, root, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@@ -2716,7 +2745,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
path->nodes[1], slot - 1, &left);
if (ret) {
/* we hit -ENOSPC, but it isn't fatal here */
- ret = 1;
+ if (ret == -ENOSPC)
+ ret = 1;
goto out;
}
@@ -2738,22 +2768,21 @@ out:
/*
* split the path's leaf in two, making sure there is at least data_size
* available for the resulting leaf level of the path.
- *
- * returns 0 if all went well and < 0 on failure.
*/
-static noinline int copy_for_split(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *l,
- struct extent_buffer *right,
- int slot, int mid, int nritems)
+static noinline void copy_for_split(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct extent_buffer *l,
+ struct extent_buffer *right,
+ int slot, int mid, int nritems)
{
int data_copy_size;
int rt_data_off;
int i;
- int ret = 0;
- int wret;
struct btrfs_disk_key disk_key;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
@@ -2775,17 +2804,15 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
struct btrfs_item *item = btrfs_item_nr(right, i);
u32 ioff;
- ioff = btrfs_item_offset(right, item);
- btrfs_set_item_offset(right, item, ioff + rt_data_off);
+ ioff = btrfs_token_item_offset(right, item, &token);
+ btrfs_set_token_item_offset(right, item,
+ ioff + rt_data_off, &token);
}
btrfs_set_header_nritems(l, mid);
- ret = 0;
btrfs_item_key(right, &disk_key, 0);
- wret = insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1] + 1, 1);
- if (wret)
- ret = wret;
+ insert_ptr(trans, root, path, &disk_key, right->start,
+ path->slots[1] + 1, 1);
btrfs_mark_buffer_dirty(right);
btrfs_mark_buffer_dirty(l);
@@ -2803,8 +2830,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
}
BUG_ON(path->slots[0] < 0);
-
- return ret;
}
/*
@@ -2993,12 +3018,8 @@ again:
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
- wret = insert_ptr(trans, root, path,
- &disk_key, right->start,
- path->slots[1] + 1, 1);
- if (wret)
- ret = wret;
-
+ insert_ptr(trans, root, path, &disk_key, right->start,
+ path->slots[1] + 1, 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -3006,29 +3027,21 @@ again:
path->slots[1] += 1;
} else {
btrfs_set_header_nritems(right, 0);
- wret = insert_ptr(trans, root, path,
- &disk_key,
- right->start,
+ insert_ptr(trans, root, path, &disk_key, right->start,
path->slots[1], 1);
- if (wret)
- ret = wret;
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
path->slots[0] = 0;
- if (path->slots[1] == 0) {
- wret = fixup_low_keys(trans, root,
- path, &disk_key, 1);
- if (wret)
- ret = wret;
- }
+ if (path->slots[1] == 0)
+ fixup_low_keys(trans, root, path,
+ &disk_key, 1);
}
btrfs_mark_buffer_dirty(right);
return ret;
}
- ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
- BUG_ON(ret);
+ copy_for_split(trans, root, path, l, right, slot, mid, nritems);
if (split == 2) {
BUG_ON(num_doubles != 0);
@@ -3036,7 +3049,7 @@ again:
goto again;
}
- return ret;
+ return 0;
push_for_double:
push_for_double_split(trans, root, path, data_size);
@@ -3238,11 +3251,9 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
return ret;
path->slots[0]++;
- ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
- item_size, item_size +
- sizeof(struct btrfs_item), 1);
- BUG_ON(ret);
-
+ setup_items_for_insert(trans, root, path, new_key, &item_size,
+ item_size, item_size +
+ sizeof(struct btrfs_item), 1);
leaf = path->nodes[0];
memcpy_extent_buffer(leaf,
btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -3257,10 +3268,10 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
-int btrfs_truncate_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
@@ -3271,13 +3282,16 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
unsigned int old_size;
unsigned int size_diff;
int i;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
leaf = path->nodes[0];
slot = path->slots[0];
old_size = btrfs_item_size_nr(leaf, slot);
if (old_size == new_size)
- return 0;
+ return;
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(root, leaf);
@@ -3297,8 +3311,9 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
u32 ioff;
item = btrfs_item_nr(leaf, i);
- ioff = btrfs_item_offset(leaf, item);
- btrfs_set_item_offset(leaf, item, ioff + size_diff);
+ ioff = btrfs_token_item_offset(leaf, item, &token);
+ btrfs_set_token_item_offset(leaf, item,
+ ioff + size_diff, &token);
}
/* shift the data */
@@ -3350,15 +3365,14 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
btrfs_print_leaf(root, leaf);
BUG();
}
- return 0;
}
/*
* make the item pointed to by the path bigger, data_size is the new size.
*/
-int btrfs_extend_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- u32 data_size)
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ u32 data_size)
{
int slot;
struct extent_buffer *leaf;
@@ -3368,6 +3382,9 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
unsigned int old_data;
unsigned int old_size;
int i;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
leaf = path->nodes[0];
@@ -3397,8 +3414,9 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
u32 ioff;
item = btrfs_item_nr(leaf, i);
- ioff = btrfs_item_offset(leaf, item);
- btrfs_set_item_offset(leaf, item, ioff - data_size);
+ ioff = btrfs_token_item_offset(leaf, item, &token);
+ btrfs_set_token_item_offset(leaf, item,
+ ioff - data_size, &token);
}
/* shift the data */
@@ -3416,7 +3434,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
btrfs_print_leaf(root, leaf);
BUG();
}
- return 0;
}
/*
@@ -3441,6 +3458,9 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
unsigned int data_end;
struct btrfs_disk_key disk_key;
struct btrfs_key found_key;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
for (i = 0; i < nr; i++) {
if (total_size + data_size[i] + sizeof(struct btrfs_item) >
@@ -3506,8 +3526,9 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
u32 ioff;
item = btrfs_item_nr(leaf, i);
- ioff = btrfs_item_offset(leaf, item);
- btrfs_set_item_offset(leaf, item, ioff - total_data);
+ ioff = btrfs_token_item_offset(leaf, item, &token);
+ btrfs_set_token_item_offset(leaf, item,
+ ioff - total_data, &token);
}
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -3534,9 +3555,10 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
btrfs_set_item_key(leaf, &disk_key, slot + i);
item = btrfs_item_nr(leaf, slot + i);
- btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
+ btrfs_set_token_item_offset(leaf, item,
+ data_end - data_size[i], &token);
data_end -= data_size[i];
- btrfs_set_item_size(leaf, item, data_size[i]);
+ btrfs_set_token_item_size(leaf, item, data_size[i], &token);
}
btrfs_set_header_nritems(leaf, nritems + nr);
btrfs_mark_buffer_dirty(leaf);
@@ -3544,7 +3566,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
ret = 0;
if (slot == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(trans, root, path, &disk_key, 1);
}
if (btrfs_leaf_free_space(root, leaf) < 0) {
@@ -3562,19 +3584,21 @@ out:
* to save stack depth by doing the bulk of the work in a function
* that doesn't call btrfs_search_slot
*/
-int setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- u32 total_data, u32 total_size, int nr)
+void setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr)
{
struct btrfs_item *item;
int i;
u32 nritems;
unsigned int data_end;
struct btrfs_disk_key disk_key;
- int ret;
struct extent_buffer *leaf;
int slot;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
leaf = path->nodes[0];
slot = path->slots[0];
@@ -3606,8 +3630,9 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
u32 ioff;
item = btrfs_item_nr(leaf, i);
- ioff = btrfs_item_offset(leaf, item);
- btrfs_set_item_offset(leaf, item, ioff - total_data);
+ ioff = btrfs_token_item_offset(leaf, item, &token);
+ btrfs_set_token_item_offset(leaf, item,
+ ioff - total_data, &token);
}
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -3626,17 +3651,17 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
btrfs_set_item_key(leaf, &disk_key, slot + i);
item = btrfs_item_nr(leaf, slot + i);
- btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
+ btrfs_set_token_item_offset(leaf, item,
+ data_end - data_size[i], &token);
data_end -= data_size[i];
- btrfs_set_item_size(leaf, item, data_size[i]);
+ btrfs_set_token_item_size(leaf, item, data_size[i], &token);
}
btrfs_set_header_nritems(leaf, nritems + nr);
- ret = 0;
if (slot == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(trans, root, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
btrfs_mark_buffer_dirty(leaf);
@@ -3645,7 +3670,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
btrfs_print_leaf(root, leaf);
BUG();
}
- return ret;
}
/*
@@ -3672,16 +3696,14 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
if (ret == 0)
return -EEXIST;
if (ret < 0)
- goto out;
+ return ret;
slot = path->slots[0];
BUG_ON(slot < 0);
- ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
+ setup_items_for_insert(trans, root, path, cpu_key, data_size,
total_data, total_size, nr);
-
-out:
- return ret;
+ return 0;
}
/*
@@ -3717,13 +3739,11 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
* the tree should have been previously balanced so the deletion does not
* empty a node.
*/
-static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_path *path, int level, int slot)
+static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, int level, int slot)
{
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
- int ret = 0;
- int wret;
nritems = btrfs_header_nritems(parent);
if (slot != nritems - 1) {
@@ -3743,12 +3763,9 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
- if (wret)
- ret = wret;
+ fixup_low_keys(trans, root, path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
- return ret;
}
/*
@@ -3761,17 +3778,13 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* The path must have already been setup for deleting the leaf, including
* all the proper balancing. path->nodes[1] must be locked.
*/
-static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *leaf)
+static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct extent_buffer *leaf)
{
- int ret;
-
WARN_ON(btrfs_header_generation(leaf) != trans->transid);
- ret = del_ptr(trans, root, path, 1, path->slots[1]);
- if (ret)
- return ret;
+ del_ptr(trans, root, path, 1, path->slots[1]);
/*
* btrfs_free_extent is expensive, we want to make sure we
@@ -3781,8 +3794,9 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len);
+ extent_buffer_get(leaf);
btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
- return 0;
+ free_extent_buffer_stale(leaf);
}
/*
* delete the item at the leaf level in path. If that empties
@@ -3799,6 +3813,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int wret;
int i;
u32 nritems;
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
leaf = path->nodes[0];
last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
@@ -3820,8 +3837,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u32 ioff;
item = btrfs_item_nr(leaf, i);
- ioff = btrfs_item_offset(leaf, item);
- btrfs_set_item_offset(leaf, item, ioff + dsize);
+ ioff = btrfs_token_item_offset(leaf, item, &token);
+ btrfs_set_token_item_offset(leaf, item,
+ ioff + dsize, &token);
}
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
@@ -3839,8 +3857,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
} else {
btrfs_set_path_blocking(path);
clean_tree_block(trans, root, leaf);
- ret = btrfs_del_leaf(trans, root, path, leaf);
- BUG_ON(ret);
+ btrfs_del_leaf(trans, root, path, leaf);
}
} else {
int used = leaf_space_used(leaf, 0, nritems);
@@ -3848,10 +3865,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- wret = fixup_low_keys(trans, root, path,
- &disk_key, 1);
- if (wret)
- ret = wret;
+ fixup_low_keys(trans, root, path, &disk_key, 1);
}
/* delete the leaf if it is mostly empty */
@@ -3879,9 +3893,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (btrfs_header_nritems(leaf) == 0) {
path->slots[1] = slot;
- ret = btrfs_del_leaf(trans, root, path, leaf);
- BUG_ON(ret);
+ btrfs_del_leaf(trans, root, path, leaf);
free_extent_buffer(leaf);
+ ret = 0;
} else {
/* if we're still in the path, make sure
* we're dirty. Otherwise, one of the
@@ -4059,18 +4073,18 @@ find_next_key:
path->slots[level] = slot;
if (level == path->lowest_level) {
ret = 0;
- unlock_up(path, level, 1);
+ unlock_up(path, level, 1, 0, NULL);
goto out;
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
- BUG_ON(!cur);
+ BUG_ON(!cur); /* -ENOMEM */
btrfs_tree_read_lock(cur);
path->locks[level - 1] = BTRFS_READ_LOCK;
path->nodes[level - 1] = cur;
- unlock_up(path, level, 1);
+ unlock_up(path, level, 1, 0, NULL);
btrfs_clear_path_blocking(path, NULL, 0);
}
out:
@@ -4306,7 +4320,7 @@ again:
}
ret = 0;
done:
- unlock_up(path, 0, 1);
+ unlock_up(path, 0, 1, 0, NULL);
path->leave_spinning = old_spinning;
if (!old_spinning)
btrfs_set_path_blocking(path);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80b6486fd5e6..5b8ef8eb3521 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -48,6 +48,8 @@ struct btrfs_ordered_sum;
#define BTRFS_MAGIC "_BHRfS_M"
+#define BTRFS_MAX_MIRRORS 2
+
#define BTRFS_MAX_LEVEL 8
#define BTRFS_COMPAT_EXTENT_TREE_V0
@@ -138,6 +140,12 @@ struct btrfs_ordered_sum;
#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2
/*
+ * the max metadata block size. This limit is somewhat artificial,
+ * but the memmove costs go through the roof for larger blocks.
+ */
+#define BTRFS_MAX_METADATA_BLOCKSIZE 65536
+
+/*
* we can actually store much bigger names, but lets not confuse the rest
* of linux
*/
@@ -461,6 +469,19 @@ struct btrfs_super_block {
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3)
+/*
+ * some patches floated around with a second compression method
+ * lets save that incompat here for when they do get in
+ * Note we don't actually support it, we're just reserving the
+ * number
+ */
+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4)
+
+/*
+ * older kernels tried to do bigger metadata blocks, but the
+ * code was pretty buggy. Lets not let them try anymore.
+ */
+#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
@@ -468,6 +489,7 @@ struct btrfs_super_block {
(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
+ BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO)
/*
@@ -829,6 +851,21 @@ struct btrfs_csum_item {
*/
#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
+#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
+ BTRFS_AVAIL_ALLOC_BIT_SINGLE)
+
+static inline u64 chunk_to_extended(u64 flags)
+{
+ if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)
+ flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+ return flags;
+}
+static inline u64 extended_to_chunk(u64 flags)
+{
+ return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+}
+
struct btrfs_block_group_item {
__le64 used;
__le64 chunk_objectid;
@@ -1503,6 +1540,7 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19)
#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20)
#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
+#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -1526,6 +1564,17 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
+struct btrfs_map_token {
+ struct extent_buffer *eb;
+ char *kaddr;
+ unsigned long offset;
+};
+
+static inline void btrfs_init_map_token (struct btrfs_map_token *token)
+{
+ memset(token, 0, sizeof(*token));
+}
+
/* some macros to generate set/get funcs for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
* one for u8:
@@ -1549,20 +1598,22 @@ struct btrfs_ioctl_defrag_range_args {
#ifndef BTRFS_SETGET_FUNCS
#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
+u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, struct btrfs_map_token *token); \
+void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token);\
void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
#endif
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
{ \
- type *p = page_address(eb->first_page); \
+ type *p = page_address(eb->pages[0]); \
u##bits res = le##bits##_to_cpu(p->member); \
return res; \
} \
static inline void btrfs_set_##name(struct extent_buffer *eb, \
u##bits val) \
{ \
- type *p = page_address(eb->first_page); \
+ type *p = page_address(eb->pages[0]); \
p->member = cpu_to_le##bits(val); \
}
@@ -2466,8 +2517,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
- u64 search_end, struct btrfs_key *ins,
- u64 data);
+ struct btrfs_key *ins, u64 data);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref, int for_cow);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2484,8 +2534,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
u64 start, u64 len);
-int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2548,8 +2598,8 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
u64 num_bytes);
int btrfs_set_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
-int btrfs_set_block_group_rw(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache);
+void btrfs_set_block_group_rw(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
@@ -2568,9 +2618,9 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
-int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *new_key);
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -2590,12 +2640,13 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret, u64 new_root_objectid);
int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf);
-int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_path *path, u32 data_size);
-int btrfs_truncate_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- u32 new_size, int from_end);
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ u32 data_size);
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -2629,10 +2680,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
return btrfs_del_items(trans, root, path, path->slots[0], 1);
}
-int setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- u32 total_data, u32 total_size, int nr);
+void setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr);
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, void *data, u32 data_size);
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
@@ -2659,9 +2710,9 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
}
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
-void btrfs_drop_snapshot(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, int update_ref,
- int for_reloc);
+int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ int update_ref, int for_reloc);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
@@ -2687,24 +2738,6 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree(fs_info->super_for_commit);
kfree(fs_info);
}
-/**
- * profile_is_valid - tests whether a given profile is valid and reduced
- * @flags: profile to validate
- * @extended: if true @flags is treated as an extended profile
- */
-static inline int profile_is_valid(u64 flags, int extended)
-{
- u64 mask = ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
- flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
- if (extended)
- mask &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-
- if (flags & mask)
- return 0;
- /* true if zero or exactly one bit set */
- return (flags & (~flags + 1)) == flags;
-}
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -2723,9 +2756,10 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_root_item
*item);
-int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_key *key, struct btrfs_root_item
- *item);
+int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_key *key,
+ struct btrfs_root_item *item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -2909,7 +2943,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root);
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
-int btrfs_invalidate_inodes(struct btrfs_root *root);
+void btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
int btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -2961,13 +2995,41 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */
int btrfs_parse_options(struct btrfs_root *root, char *options);
int btrfs_sync_fs(struct super_block *sb, int wait);
+void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno);
+ unsigned int line, int errno, const char *fmt, ...);
+
+void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const char *function,
+ unsigned int line, int errno);
+
+#define btrfs_abort_transaction(trans, root, errno) \
+do { \
+ __btrfs_abort_transaction(trans, root, __func__, \
+ __LINE__, errno); \
+} while (0)
#define btrfs_std_error(fs_info, errno) \
do { \
if ((errno)) \
- __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\
+ __btrfs_std_error((fs_info), __func__, \
+ __LINE__, (errno), NULL); \
+} while (0)
+
+#define btrfs_error(fs_info, errno, fmt, args...) \
+do { \
+ __btrfs_std_error((fs_info), __func__, __LINE__, \
+ (errno), fmt, ##args); \
+} while (0)
+
+void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int errno, const char *fmt, ...);
+
+#define btrfs_panic(fs_info, errno, fmt, args...) \
+do { \
+ struct btrfs_fs_info *_i = (fs_info); \
+ __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
+ BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
} while (0)
/* acl.c */
@@ -3003,16 +3065,17 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve);
-void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
/* scrub.c */
int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
struct btrfs_scrub_progress *progress, int readonly);
-int btrfs_scrub_pause(struct btrfs_root *root);
-int btrfs_scrub_pause_super(struct btrfs_root *root);
-int btrfs_scrub_continue(struct btrfs_root *root);
-int btrfs_scrub_continue_super(struct btrfs_root *root);
+void btrfs_scrub_pause(struct btrfs_root *root);
+void btrfs_scrub_pause_super(struct btrfs_root *root);
+void btrfs_scrub_continue(struct btrfs_root *root);
+void btrfs_scrub_continue_super(struct btrfs_root *root);
+int __btrfs_scrub_cancel(struct btrfs_fs_info *info);
int btrfs_scrub_cancel(struct btrfs_root *root);
int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index fe4cd0f1cef1..03e3748d84d0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
return NULL;
}
+/* Will return either the node or PTR_ERR(-ENOMEM) */
static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
struct inode *inode)
{
@@ -836,10 +837,8 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
btrfs_clear_path_blocking(path, NULL, 0);
/* insert the keys of the items */
- ret = setup_items_for_insert(trans, root, path, keys, data_size,
- total_data_size, total_size, nitems);
- if (ret)
- goto error;
+ setup_items_for_insert(trans, root, path, keys, data_size,
+ total_data_size, total_size, nitems);
/* insert the dir index items */
slot = path->slots[0];
@@ -1108,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
return 0;
}
-/* Called when committing the transaction. */
+/*
+ * Called when committing the transaction.
+ * Returns 0 on success.
+ * Returns < 0 on error and returns with an aborted transaction with any
+ * outstanding delayed items cleaned up.
+ */
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret = 0;
+ if (trans->aborted)
+ return -EIO;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1130,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node) {
- root = curr_node->root;
- ret = btrfs_insert_delayed_items(trans, path, root,
+ curr_root = curr_node->root;
+ ret = btrfs_insert_delayed_items(trans, path, curr_root,
curr_node);
if (!ret)
- ret = btrfs_delete_delayed_items(trans, path, root,
- curr_node);
+ ret = btrfs_delete_delayed_items(trans, path,
+ curr_root, curr_node);
if (!ret)
- ret = btrfs_update_delayed_inode(trans, root, path,
- curr_node);
+ ret = btrfs_update_delayed_inode(trans, curr_root,
+ path, curr_node);
if (ret) {
btrfs_release_delayed_node(curr_node);
+ btrfs_abort_transaction(trans, root, ret);
break;
}
@@ -1151,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
btrfs_free_path(path);
trans->block_rsv = block_rsv;
+
return ret;
}
@@ -1371,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
btrfs_wq_run_delayed_node(delayed_root, root, 0);
}
+/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 66e4f29505a3..69f22e3ab3bc 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -420,7 +420,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
* this does all the dirty work in terms of maintaining the correct
* overall modification count.
*/
-static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref,
u64 bytenr, u64 num_bytes,
@@ -487,20 +487,19 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(ref);
+ kfree(head_ref);
} else {
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
}
- return 0;
}
/*
* helper to insert a delayed tree ref into the rbtree.
*/
-static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -549,18 +548,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(ref);
+ kfree(full_ref);
} else {
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
}
- return 0;
}
/*
* helper to insert a delayed data ref into the rbtree.
*/
-static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -611,12 +609,11 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(ref);
+ kfree(full_ref);
} else {
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
}
- return 0;
}
/*
@@ -634,7 +631,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
BUG_ON(extent_op && extent_op->is_data);
ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -656,14 +652,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+ add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, action, 0);
- BUG_ON(ret);
- ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
+ add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action,
for_cow);
- BUG_ON(ret);
if (!need_ref_seq(for_cow, ref_root) &&
waitqueue_active(&delayed_refs->seq_wait))
wake_up(&delayed_refs->seq_wait);
@@ -685,7 +679,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
BUG_ON(extent_op && !extent_op->is_data);
ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -707,14 +700,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+ add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, action, 1);
- BUG_ON(ret);
- ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
+ add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
action, for_cow);
- BUG_ON(ret);
if (!need_ref_seq(for_cow, ref_root) &&
waitqueue_active(&delayed_refs->seq_wait))
wake_up(&delayed_refs->seq_wait);
@@ -729,7 +720,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
{
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
if (!head_ref)
@@ -740,10 +730,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+ add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data);
- BUG_ON(ret);
if (waitqueue_active(&delayed_refs->seq_wait))
wake_up(&delayed_refs->seq_wait);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 31d84e78129b..c1a074d0696f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -49,9 +49,8 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
di = btrfs_match_dir_item_name(root, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
- ret = btrfs_extend_item(trans, root, path, data_size);
- }
- if (ret < 0)
+ btrfs_extend_item(trans, root, path, data_size);
+ } else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
leaf = path->nodes[0];
@@ -116,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* 'location' is the key to stuff into the directory item, 'type' is the
* type of the inode we're pointing to, and 'index' is the sequence number
* to use for the second index (if one is created).
+ * Will return 0 or -ENOMEM
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, const char *name, int name_len,
@@ -383,8 +383,8 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
- ret = btrfs_truncate_item(trans, root, path,
- item_len - sub_item_len, 1);
+ btrfs_truncate_item(trans, root, path,
+ item_len - sub_item_len, 1);
}
return ret;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 534266fe505f..20196f411206 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -48,20 +48,19 @@
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
-static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
-static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
-static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
+static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
+static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
-static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
-static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
+static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
+static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages,
int mark);
static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
struct extent_io_tree *pinned_extents);
-static int btrfs_cleanup_transaction(struct btrfs_root *root);
/*
* end_io_wq structs are used to do processing in task context when an IO is
@@ -99,6 +98,7 @@ struct async_submit_bio {
*/
u64 bio_offset;
struct btrfs_work work;
+ int error;
};
/*
@@ -332,8 +332,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
return 0;
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
- 0, &cached_state, GFP_NOFS);
- if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
+ 0, &cached_state);
+ if (extent_buffer_uptodate(eb) &&
btrfs_header_generation(eb) == parent_transid) {
ret = 0;
goto out;
@@ -344,7 +344,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
(unsigned long long)parent_transid,
(unsigned long long)btrfs_header_generation(eb));
ret = 1;
- clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
+ clear_extent_buffer_uptodate(eb);
out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state, GFP_NOFS);
@@ -360,9 +360,11 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
u64 start, u64 parent_transid)
{
struct extent_io_tree *io_tree;
+ int failed = 0;
int ret;
int num_copies = 0;
int mirror_num = 0;
+ int failed_mirror = 0;
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
@@ -370,9 +372,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
btree_get_extent, mirror_num);
- if (!ret &&
- !verify_parent_transid(io_tree, eb, parent_transid))
- return ret;
+ if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
+ break;
/*
* This buffer's crc is fine, but its contents are corrupted, so
@@ -380,18 +381,31 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
* any less wrong.
*/
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
- return ret;
+ break;
+
+ if (!failed_mirror) {
+ failed = 1;
+ printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror);
+ failed_mirror = eb->failed_mirror;
+ }
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
eb->start, eb->len);
if (num_copies == 1)
- return ret;
+ break;
mirror_num++;
+ if (mirror_num == failed_mirror)
+ mirror_num++;
+
if (mirror_num > num_copies)
- return ret;
+ break;
}
- return -EIO;
+
+ if (failed && !ret)
+ repair_eb_io_failure(root, eb, failed_mirror);
+
+ return ret;
}
/*
@@ -404,50 +418,27 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
struct extent_io_tree *tree;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 found_start;
- unsigned long len;
struct extent_buffer *eb;
- int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (page->private == EXTENT_PAGE_PRIVATE) {
- WARN_ON(1);
- goto out;
- }
- if (!page->private) {
- WARN_ON(1);
- goto out;
- }
- len = page->private >> 2;
- WARN_ON(len == 0);
-
- eb = alloc_extent_buffer(tree, start, len, page);
- if (eb == NULL) {
- WARN_ON(1);
- goto out;
- }
- ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
- btrfs_header_generation(eb));
- BUG_ON(ret);
- WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
-
+ eb = (struct extent_buffer *)page->private;
+ if (page != eb->pages[0])
+ return 0;
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
WARN_ON(1);
- goto err;
+ return 0;
}
- if (eb->first_page != page) {
+ if (eb->pages[0] != page) {
WARN_ON(1);
- goto err;
+ return 0;
}
if (!PageUptodate(page)) {
WARN_ON(1);
- goto err;
+ return 0;
}
csum_tree_block(root, eb, 0);
-err:
- free_extent_buffer(eb);
-out:
return 0;
}
@@ -537,34 +528,74 @@ static noinline int check_leaf(struct btrfs_root *root,
return 0;
}
+struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
+ struct page *page, int max_walk)
+{
+ struct extent_buffer *eb;
+ u64 start = page_offset(page);
+ u64 target = start;
+ u64 min_start;
+
+ if (start < max_walk)
+ min_start = 0;
+ else
+ min_start = start - max_walk;
+
+ while (start >= min_start) {
+ eb = find_extent_buffer(tree, start, 0);
+ if (eb) {
+ /*
+ * we found an extent buffer and it contains our page
+ * horray!
+ */
+ if (eb->start <= target &&
+ eb->start + eb->len > target)
+ return eb;
+
+ /* we found an extent buffer that wasn't for us */
+ free_extent_buffer(eb);
+ return NULL;
+ }
+ if (start == 0)
+ break;
+ start -= PAGE_CACHE_SIZE;
+ }
+ return NULL;
+}
+
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
struct extent_io_tree *tree;
u64 found_start;
int found_level;
- unsigned long len;
struct extent_buffer *eb;
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
int ret = 0;
+ int reads_done;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (page->private == EXTENT_PAGE_PRIVATE)
- goto out;
if (!page->private)
goto out;
- len = page->private >> 2;
- WARN_ON(len == 0);
+ tree = &BTRFS_I(page->mapping->host)->io_tree;
+ eb = (struct extent_buffer *)page->private;
- eb = alloc_extent_buffer(tree, start, len, page);
- if (eb == NULL) {
+ /* the pending IO might have been the only thing that kept this buffer
+ * in memory. Make sure we have a ref for all this other checks
+ */
+ extent_buffer_get(eb);
+
+ reads_done = atomic_dec_and_test(&eb->io_pages);
+ if (!reads_done)
+ goto err;
+
+ if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
ret = -EIO;
- goto out;
+ goto err;
}
found_start = btrfs_header_bytenr(eb);
- if (found_start != start) {
+ if (found_start != eb->start) {
printk_ratelimited(KERN_INFO "btrfs bad tree block start "
"%llu %llu\n",
(unsigned long long)found_start,
@@ -572,13 +603,6 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
ret = -EIO;
goto err;
}
- if (eb->first_page != page) {
- printk(KERN_INFO "btrfs bad first page %lu %lu\n",
- eb->first_page->index, page->index);
- WARN_ON(1);
- ret = -EIO;
- goto err;
- }
if (check_tree_block_fsid(root, eb)) {
printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
(unsigned long long)eb->start);
@@ -606,48 +630,31 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
ret = -EIO;
}
- end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
- end = eb->start + end - 1;
+ if (!ret)
+ set_extent_buffer_uptodate(eb);
err:
if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
btree_readahead_hook(root, eb, eb->start, ret);
}
+ if (ret)
+ clear_extent_buffer_uptodate(eb);
free_extent_buffer(eb);
out:
return ret;
}
-static int btree_io_failed_hook(struct bio *failed_bio,
- struct page *page, u64 start, u64 end,
- int mirror_num, struct extent_state *state)
+static int btree_io_failed_hook(struct page *page, int failed_mirror)
{
- struct extent_io_tree *tree;
- unsigned long len;
struct extent_buffer *eb;
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (page->private == EXTENT_PAGE_PRIVATE)
- goto out;
- if (!page->private)
- goto out;
-
- len = page->private >> 2;
- WARN_ON(len == 0);
-
- eb = alloc_extent_buffer(tree, start, len, page);
- if (eb == NULL)
- goto out;
-
- if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
- clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+ eb = (struct extent_buffer *)page->private;
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ eb->failed_mirror = failed_mirror;
+ if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
btree_readahead_hook(root, eb, eb->start, -EIO);
- }
- free_extent_buffer(eb);
-
-out:
return -EIO; /* we fixed nothing */
}
@@ -719,11 +726,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
+ int ret;
async = container_of(work, struct async_submit_bio, work);
- async->submit_bio_start(async->inode, async->rw, async->bio,
- async->mirror_num, async->bio_flags,
- async->bio_offset);
+ ret = async->submit_bio_start(async->inode, async->rw, async->bio,
+ async->mirror_num, async->bio_flags,
+ async->bio_offset);
+ if (ret)
+ async->error = ret;
}
static void run_one_async_done(struct btrfs_work *work)
@@ -744,6 +754,12 @@ static void run_one_async_done(struct btrfs_work *work)
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
+ /* If an error occured we just want to clean up the bio and move on */
+ if (async->error) {
+ bio_endio(async->bio, async->error);
+ return;
+ }
+
async->submit_bio_done(async->inode, async->rw, async->bio,
async->mirror_num, async->bio_flags,
async->bio_offset);
@@ -785,6 +801,8 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->bio_flags = bio_flags;
async->bio_offset = bio_offset;
+ async->error = 0;
+
atomic_inc(&fs_info->nr_async_submits);
if (rw & REQ_SYNC)
@@ -806,15 +824,18 @@ static int btree_csum_one_bio(struct bio *bio)
struct bio_vec *bvec = bio->bi_io_vec;
int bio_index = 0;
struct btrfs_root *root;
+ int ret = 0;
WARN_ON(bio->bi_vcnt <= 0);
while (bio_index < bio->bi_vcnt) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- csum_dirty_buffer(root, bvec->bv_page);
+ ret = csum_dirty_buffer(root, bvec->bv_page);
+ if (ret)
+ break;
bio_index++;
bvec++;
}
- return 0;
+ return ret;
}
static int __btree_submit_bio_start(struct inode *inode, int rw,
@@ -826,8 +847,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- btree_csum_one_bio(bio);
- return 0;
+ return btree_csum_one_bio(bio);
}
static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
@@ -847,15 +867,16 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
{
int ret;
- ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
- bio, 1);
- BUG_ON(ret);
-
if (!(rw & REQ_WRITE)) {
+
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
*/
+ ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
+ bio, 1);
+ if (ret)
+ return ret;
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0);
}
@@ -893,34 +914,6 @@ static int btree_migratepage(struct address_space *mapping,
}
#endif
-static int btree_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct extent_io_tree *tree;
- struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- struct extent_buffer *eb;
- int was_dirty;
-
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (!(current->flags & PF_MEMALLOC)) {
- return extent_write_full_page(tree, page,
- btree_get_extent, wbc);
- }
-
- redirty_page_for_writepage(wbc, page);
- eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
- WARN_ON(!eb);
-
- was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- if (!was_dirty) {
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
- spin_unlock(&root->fs_info->delalloc_lock);
- }
- free_extent_buffer(eb);
-
- unlock_page(page);
- return 0;
-}
static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
@@ -940,7 +933,7 @@ static int btree_writepages(struct address_space *mapping,
if (num_dirty < thresh)
return 0;
}
- return extent_writepages(tree, mapping, btree_get_extent, wbc);
+ return btree_write_cache_pages(mapping, wbc);
}
static int btree_readpage(struct file *file, struct page *page)
@@ -952,16 +945,8 @@ static int btree_readpage(struct file *file, struct page *page)
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
{
- struct extent_io_tree *tree;
- struct extent_map_tree *map;
- int ret;
-
if (PageWriteback(page) || PageDirty(page))
return 0;
-
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- map = &BTRFS_I(page->mapping->host)->extent_tree;
-
/*
* We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
* slab allocation from alloc_extent_state down the callchain where
@@ -969,18 +954,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
*/
gfp_flags &= ~GFP_SLAB_BUG_MASK;
- ret = try_release_extent_state(map, tree, page, gfp_flags);
- if (!ret)
- return 0;
-
- ret = try_release_extent_buffer(tree, page);
- if (ret == 1) {
- ClearPagePrivate(page);
- set_page_private(page, 0);
- page_cache_release(page);
- }
-
- return ret;
+ return try_release_extent_buffer(page, gfp_flags);
}
static void btree_invalidatepage(struct page *page, unsigned long offset)
@@ -998,15 +972,28 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
}
}
+static int btree_set_page_dirty(struct page *page)
+{
+ struct extent_buffer *eb;
+
+ BUG_ON(!PagePrivate(page));
+ eb = (struct extent_buffer *)page->private;
+ BUG_ON(!eb);
+ BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(!atomic_read(&eb->refs));
+ btrfs_assert_tree_locked(eb);
+ return __set_page_dirty_nobuffers(page);
+}
+
static const struct address_space_operations btree_aops = {
.readpage = btree_readpage,
- .writepage = btree_writepage,
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
#endif
+ .set_page_dirty = btree_set_page_dirty,
};
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -1049,7 +1036,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf);
return -EIO;
- } else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
+ } else if (extent_buffer_uptodate(buf)) {
*eb = buf;
} else {
free_extent_buffer(buf);
@@ -1074,20 +1061,20 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
struct extent_buffer *eb;
eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize, NULL);
+ bytenr, blocksize);
return eb;
}
int btrfs_write_tree_block(struct extent_buffer *buf)
{
- return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
+ return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
buf->start + buf->len - 1);
}
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
- return filemap_fdatawait_range(buf->first_page->mapping,
+ return filemap_fdatawait_range(buf->pages[0]->mapping,
buf->start, buf->start + buf->len - 1);
}
@@ -1102,17 +1089,13 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
return NULL;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
-
- if (ret == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
return buf;
}
-int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf)
+void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct extent_buffer *buf)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
if (btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid) {
btrfs_assert_tree_locked(buf);
@@ -1121,23 +1104,27 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
spin_lock(&root->fs_info->delalloc_lock);
if (root->fs_info->dirty_metadata_bytes >= buf->len)
root->fs_info->dirty_metadata_bytes -= buf->len;
- else
- WARN_ON(1);
+ else {
+ spin_unlock(&root->fs_info->delalloc_lock);
+ btrfs_panic(root->fs_info, -EOVERFLOW,
+ "Can't clear %lu bytes from "
+ " dirty_mdatadata_bytes (%lu)",
+ buf->len,
+ root->fs_info->dirty_metadata_bytes);
+ }
spin_unlock(&root->fs_info->delalloc_lock);
}
/* ugh, clear_extent_buffer_dirty needs to lock the page */
btrfs_set_lock_blocking(buf);
- clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ clear_extent_buffer_dirty(buf);
}
- return 0;
}
-static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
- u32 stripesize, struct btrfs_root *root,
- struct btrfs_fs_info *fs_info,
- u64 objectid)
+static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
+ u32 stripesize, struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid)
{
root->node = NULL;
root->commit_root = NULL;
@@ -1189,13 +1176,12 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->defrag_running = 0;
root->root_key.objectid = objectid;
root->anon_dev = 0;
- return 0;
}
-static int find_and_setup_root(struct btrfs_root *tree_root,
- struct btrfs_fs_info *fs_info,
- u64 objectid,
- struct btrfs_root *root)
+static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid,
+ struct btrfs_root *root)
{
int ret;
u32 blocksize;
@@ -1208,7 +1194,8 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
&root->root_item, &root->root_key);
if (ret > 0)
return -ENOENT;
- BUG_ON(ret);
+ else if (ret < 0)
+ return ret;
generation = btrfs_root_generation(&root->root_item);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
@@ -1377,7 +1364,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
root->commit_root = btrfs_root_node(root);
- BUG_ON(!root->node);
+ BUG_ON(!root->node); /* -ENOMEM */
out:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1;
@@ -1513,41 +1500,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
return 0;
}
-static int bio_ready_for_csum(struct bio *bio)
-{
- u64 length = 0;
- u64 buf_len = 0;
- u64 start = 0;
- struct page *page;
- struct extent_io_tree *io_tree = NULL;
- struct bio_vec *bvec;
- int i;
- int ret;
-
- bio_for_each_segment(bvec, bio, i) {
- page = bvec->bv_page;
- if (page->private == EXTENT_PAGE_PRIVATE) {
- length += bvec->bv_len;
- continue;
- }
- if (!page->private) {
- length += bvec->bv_len;
- continue;
- }
- length = bvec->bv_len;
- buf_len = page->private >> 2;
- start = page_offset(page) + bvec->bv_offset;
- io_tree = &BTRFS_I(page->mapping->host)->io_tree;
- }
- /* are we fully contained in this bio? */
- if (buf_len <= length)
- return 1;
-
- ret = extent_range_uptodate(io_tree, start + length,
- start + buf_len - 1);
- return ret;
-}
-
/*
* called by the kthread helper functions to finally call the bio end_io
* functions. This is where read checksum verification actually happens
@@ -1563,17 +1515,6 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio = end_io_wq->bio;
fs_info = end_io_wq->info;
- /* metadata bio reads are special because the whole tree block must
- * be checksummed at once. This makes sure the entire block is in
- * ram and up to date before trying to verify things. For
- * blocksize <= pagesize, it is basically a noop
- */
- if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
- !bio_ready_for_csum(bio)) {
- btrfs_queue_worker(&fs_info->endio_meta_workers,
- &end_io_wq->work);
- return;
- }
error = end_io_wq->error;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
@@ -1614,9 +1555,10 @@ static int transaction_kthread(void *arg)
u64 transid;
unsigned long now;
unsigned long delay;
- int ret;
+ bool cannot_commit;
do {
+ cannot_commit = false;
delay = HZ * 30;
vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
mutex_lock(&root->fs_info->transaction_kthread_mutex);
@@ -1638,11 +1580,14 @@ static int transaction_kthread(void *arg)
transid = cur->transid;
spin_unlock(&root->fs_info->trans_lock);
+ /* If the file system is aborted, this will always fail. */
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ cannot_commit = true;
+ goto sleep;
+ }
if (transid == trans->transid) {
- ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ btrfs_commit_transaction(trans, root);
} else {
btrfs_end_transaction(trans, root);
}
@@ -1653,7 +1598,8 @@ sleep:
if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
- !btrfs_transaction_blocked(root->fs_info))
+ (!btrfs_transaction_blocked(root->fs_info) ||
+ cannot_commit))
schedule_timeout(delay);
__set_current_state(TASK_RUNNING);
}
@@ -2042,6 +1988,7 @@ int open_ctree(struct super_block *sb,
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
fs_info->btree_inode->i_mapping);
+ BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
@@ -2084,6 +2031,7 @@ int open_ctree(struct super_block *sb,
__setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
+ invalidate_bdev(fs_devices->latest_bdev);
bh = btrfs_read_dev_super(fs_devices->latest_bdev);
if (!bh) {
err = -EINVAL;
@@ -2104,7 +2052,12 @@ int open_ctree(struct super_block *sb,
/* check FS state, whether FS is broken. */
fs_info->fs_state |= btrfs_super_flags(disk_super);
- btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ if (ret) {
+ printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+ err = ret;
+ goto fail_alloc;
+ }
/*
* run through our array of backup supers and setup
@@ -2135,10 +2088,55 @@ int open_ctree(struct super_block *sb,
goto fail_alloc;
}
+ if (btrfs_super_leafsize(disk_super) !=
+ btrfs_super_nodesize(disk_super)) {
+ printk(KERN_ERR "BTRFS: couldn't mount because metadata "
+ "blocksizes don't match. node %d leaf %d\n",
+ btrfs_super_nodesize(disk_super),
+ btrfs_super_leafsize(disk_super));
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+ if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
+ printk(KERN_ERR "BTRFS: couldn't mount because metadata "
+ "blocksize (%d) was too large\n",
+ btrfs_super_leafsize(disk_super));
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+
features = btrfs_super_incompat_flags(disk_super);
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+
+ /*
+ * flag our filesystem as having big metadata blocks if
+ * they are bigger than the page size
+ */
+ if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
+ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+ printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
+ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+ }
+
+ nodesize = btrfs_super_nodesize(disk_super);
+ leafsize = btrfs_super_leafsize(disk_super);
+ sectorsize = btrfs_super_sectorsize(disk_super);
+ stripesize = btrfs_super_stripesize(disk_super);
+
+ /*
+ * mixed block groups end up with duplicate but slightly offset
+ * extent buffers for the same range. It leads to corruptions
+ */
+ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
+ (sectorsize != leafsize)) {
+ printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
+ "are not allowed for mixed block groups on %s\n",
+ sb->s_id);
+ goto fail_alloc;
+ }
+
btrfs_set_super_incompat_flags(disk_super, features);
features = btrfs_super_compat_ro_flags(disk_super) &
@@ -2242,10 +2240,6 @@ int open_ctree(struct super_block *sb,
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
- nodesize = btrfs_super_nodesize(disk_super);
- leafsize = btrfs_super_leafsize(disk_super);
- sectorsize = btrfs_super_sectorsize(disk_super);
- stripesize = btrfs_super_stripesize(disk_super);
tree_root->nodesize = nodesize;
tree_root->leafsize = leafsize;
tree_root->sectorsize = sectorsize;
@@ -2285,7 +2279,7 @@ int open_ctree(struct super_block *sb,
chunk_root->node = read_tree_block(chunk_root,
btrfs_super_chunk_root(disk_super),
blocksize, generation);
- BUG_ON(!chunk_root->node);
+ BUG_ON(!chunk_root->node); /* -ENOMEM */
if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
sb->s_id);
@@ -2425,21 +2419,31 @@ retry_root_backup:
log_tree_root->node = read_tree_block(tree_root, bytenr,
blocksize,
generation + 1);
+ /* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(tree_root->fs_info, ret,
+ "Failed to recover log tree");
+ free_extent_buffer(log_tree_root->node);
+ kfree(log_tree_root);
+ goto fail_trans_kthread;
+ }
if (sb->s_flags & MS_RDONLY) {
- ret = btrfs_commit_super(tree_root);
- BUG_ON(ret);
+ ret = btrfs_commit_super(tree_root);
+ if (ret)
+ goto fail_trans_kthread;
}
}
ret = btrfs_find_orphan_roots(tree_root);
- BUG_ON(ret);
+ if (ret)
+ goto fail_trans_kthread;
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_cleanup_fs_roots(fs_info);
- BUG_ON(ret);
+ if (ret) {
+ }
ret = btrfs_recover_relocation(tree_root);
if (ret < 0) {
@@ -2859,6 +2863,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
if (total_errors > max_errors) {
printk(KERN_ERR "btrfs: %d errors while writing supers\n",
total_errors);
+
+ /* This shouldn't happen. FUA is masked off if unsupported */
BUG();
}
@@ -2875,9 +2881,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
}
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
- printk(KERN_ERR "btrfs: %d errors while writing supers\n",
- total_errors);
- BUG();
+ btrfs_error(root->fs_info, -EIO,
+ "%d errors while writing supers", total_errors);
+ return -EIO;
}
return 0;
}
@@ -2891,7 +2897,20 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
return ret;
}
-int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+/* Kill all outstanding I/O */
+void btrfs_abort_devices(struct btrfs_root *root)
+{
+ struct list_head *head;
+ struct btrfs_device *dev;
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ head = &root->fs_info->fs_devices->devices;
+ list_for_each_entry_rcu(dev, head, dev_list) {
+ blk_abort_queue(dev->bdev->bd_disk->queue);
+ }
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+}
+
+void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_delete(&fs_info->fs_roots_radix,
@@ -2904,7 +2923,6 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
__btrfs_remove_free_space_cache(root->free_ino_pinned);
__btrfs_remove_free_space_cache(root->free_ino_ctl);
free_fs_root(root);
- return 0;
}
static void free_fs_root(struct btrfs_root *root)
@@ -2921,7 +2939,7 @@ static void free_fs_root(struct btrfs_root *root)
kfree(root);
}
-static int del_fs_roots(struct btrfs_fs_info *fs_info)
+static void del_fs_roots(struct btrfs_fs_info *fs_info)
{
int ret;
struct btrfs_root *gang[8];
@@ -2950,7 +2968,6 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)
for (i = 0; i < ret; i++)
btrfs_free_fs_root(fs_info, gang[i]);
}
- return 0;
}
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
@@ -2999,14 +3016,21 @@ int btrfs_commit_super(struct btrfs_root *root)
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ if (ret)
+ return ret;
/* run commit again to drop the original snapshot */
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret)
+ return ret;
ret = btrfs_write_and_wait_transaction(NULL, root);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(root->fs_info, ret,
+ "Failed to sync btree inode to disk.");
+ return ret;
+ }
ret = write_ctree_super(NULL, root, 0);
return ret;
@@ -3122,10 +3146,9 @@ int close_ctree(struct btrfs_root *root)
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
{
int ret;
- struct inode *btree_inode = buf->first_page->mapping->host;
+ struct inode *btree_inode = buf->pages[0]->mapping->host;
- ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
- NULL);
+ ret = extent_buffer_uptodate(buf);
if (!ret)
return ret;
@@ -3136,16 +3159,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
{
- struct inode *btree_inode = buf->first_page->mapping->host;
- return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ return set_extent_buffer_uptodate(buf);
}
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
u64 transid = btrfs_header_generation(buf);
- struct inode *btree_inode = root->fs_info->btree_inode;
int was_dirty;
btrfs_assert_tree_locked(buf);
@@ -3157,8 +3177,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
(unsigned long long)root->fs_info->generation);
WARN_ON(1);
}
- was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty) {
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->dirty_metadata_bytes += buf->len;
@@ -3212,12 +3231,8 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- int ret;
- ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
- if (ret == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
- return ret;
+ struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+ return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
}
static int btree_lock_page_hook(struct page *page, void *data,
@@ -3225,17 +3240,21 @@ static int btree_lock_page_hook(struct page *page, void *data,
{
struct inode *inode = page->mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_buffer *eb;
- unsigned long len;
- u64 bytenr = page_offset(page);
- if (page->private == EXTENT_PAGE_PRIVATE)
+ /*
+ * We culled this eb but the page is still hanging out on the mapping,
+ * carry on.
+ */
+ if (!PagePrivate(page))
goto out;
- len = page->private >> 2;
- eb = find_extent_buffer(io_tree, bytenr, len);
- if (!eb)
+ eb = (struct extent_buffer *)page->private;
+ if (!eb) {
+ WARN_ON(1);
+ goto out;
+ }
+ if (page != eb->pages[0])
goto out;
if (!btrfs_try_tree_write_lock(eb)) {
@@ -3254,7 +3273,6 @@ static int btree_lock_page_hook(struct page *page, void *data,
}
btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
out:
if (!trylock_page(page)) {
flush_fn(data);
@@ -3263,15 +3281,23 @@ out:
return 0;
}
-static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only)
{
+ if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
+ printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
+ return -EINVAL;
+ }
+
if (read_only)
- return;
+ return 0;
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
printk(KERN_WARNING "warning: mount fs with errors, "
"running btrfsck is recommended\n");
+ }
+
+ return 0;
}
int btrfs_error_commit_super(struct btrfs_root *root)
@@ -3293,7 +3319,7 @@ int btrfs_error_commit_super(struct btrfs_root *root)
return ret;
}
-static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
+static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
@@ -3315,11 +3341,9 @@ static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
spin_unlock(&root->fs_info->ordered_extent_lock);
mutex_unlock(&root->fs_info->ordered_operations_mutex);
-
- return 0;
}
-static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
+static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
{
struct list_head splice;
struct btrfs_ordered_extent *ordered;
@@ -3351,12 +3375,10 @@ static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
}
spin_unlock(&root->fs_info->ordered_extent_lock);
-
- return 0;
}
-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root)
+int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ struct btrfs_root *root)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -3365,6 +3387,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
delayed_refs = &trans->delayed_refs;
+again:
spin_lock(&delayed_refs->lock);
if (delayed_refs->num_entries == 0) {
spin_unlock(&delayed_refs->lock);
@@ -3386,6 +3409,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_delayed_ref_head *head;
head = btrfs_delayed_node_to_head(ref);
+ spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
kfree(head->extent_op);
delayed_refs->num_heads--;
@@ -3393,8 +3417,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
delayed_refs->num_heads_ready--;
list_del_init(&head->cluster);
mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref(ref);
+ goto again;
}
-
spin_unlock(&delayed_refs->lock);
btrfs_put_delayed_ref(ref);
@@ -3407,7 +3432,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
return ret;
}
-static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
+static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
{
struct btrfs_pending_snapshot *snapshot;
struct list_head splice;
@@ -3425,11 +3450,9 @@ static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
kfree(snapshot);
}
-
- return 0;
}
-static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
@@ -3449,8 +3472,6 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
}
spin_unlock(&root->fs_info->delalloc_lock);
-
- return 0;
}
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
@@ -3541,13 +3562,43 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
return 0;
}
-static int btrfs_cleanup_transaction(struct btrfs_root *root)
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ struct btrfs_root *root)
+{
+ btrfs_destroy_delayed_refs(cur_trans, root);
+ btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+ cur_trans->dirty_pages.dirty_bytes);
+
+ /* FIXME: cleanup wait for commit */
+ cur_trans->in_commit = 1;
+ cur_trans->blocked = 1;
+ if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
+ wake_up(&root->fs_info->transaction_blocked_wait);
+
+ cur_trans->blocked = 0;
+ if (waitqueue_active(&root->fs_info->transaction_wait))
+ wake_up(&root->fs_info->transaction_wait);
+
+ cur_trans->commit_done = 1;
+ if (waitqueue_active(&cur_trans->commit_wait))
+ wake_up(&cur_trans->commit_wait);
+
+ btrfs_destroy_pending_snapshots(cur_trans);
+
+ btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+ EXTENT_DIRTY);
+
+ /*
+ memset(cur_trans, 0, sizeof(*cur_trans));
+ kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+ */
+}
+
+int btrfs_cleanup_transaction(struct btrfs_root *root)
{
struct btrfs_transaction *t;
LIST_HEAD(list);
- WARN_ON(1);
-
mutex_lock(&root->fs_info->transaction_kthread_mutex);
spin_lock(&root->fs_info->trans_lock);
@@ -3612,6 +3663,17 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
return 0;
}
+static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
+ u64 start, u64 end,
+ struct extent_state *state)
+{
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ btrfs_error(fs_info, -EIO,
+ "Error occured while writing out btree at %llu", start);
+ return -EIO;
+}
+
static struct extent_io_ops btree_extent_io_ops = {
.write_cache_pages_lock_hook = btree_lock_page_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3619,4 +3681,5 @@ static struct extent_io_ops btree_extent_io_ops = {
.submit_bio_hook = btree_submit_bio_hook,
/* note we're sharing with inode.c for the merge bio hook */
.merge_bio_hook = btrfs_merge_bio_hook,
+ .writepage_io_failed_hook = btree_writepage_io_failed_hook,
};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e4bc4741319b..a7ace1a2dd12 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -44,8 +44,8 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
-int clean_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf);
+void clean_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct extent_buffer *buf);
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
@@ -64,7 +64,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
-int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
+void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
@@ -85,6 +85,10 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
+int btrfs_cleanup_transaction(struct btrfs_root *root);
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
+ struct btrfs_root *root);
+void btrfs_abort_devices(struct btrfs_root *root);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 5f77166fd01c..e887ee62b6d4 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
if (ret < 0)
goto fail;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Key with offset of -1 found */
if (path->slots[0] == 0) {
ret = -ENOENT;
goto fail;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 37e0a800d34e..a84420491c11 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -245,7 +245,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
cache->bytes_super += stripe_len;
ret = add_excluded_extent(root, cache->key.objectid,
stripe_len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -253,13 +253,13 @@ static int exclude_super_stripes(struct btrfs_root *root,
ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
cache->key.objectid, bytenr,
0, &logical, &nr, &stripe_len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
while (nr--) {
cache->bytes_super += stripe_len;
ret = add_excluded_extent(root, logical[nr],
stripe_len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
kfree(logical);
@@ -321,7 +321,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
total_added += size;
ret = btrfs_add_free_space(block_group, start,
size);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or logic error */
start = extent_end + 1;
} else {
break;
@@ -332,7 +332,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
size = end - start;
total_added += size;
ret = btrfs_add_free_space(block_group, start, size);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or logic error */
}
return total_added;
@@ -474,7 +474,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
int ret = 0;
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
- BUG_ON(!caching_ctl);
+ if (!caching_ctl)
+ return -ENOMEM;
INIT_LIST_HEAD(&caching_ctl->list);
mutex_init(&caching_ctl->mutex);
@@ -982,7 +983,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
ret = btrfs_next_leaf(root, path);
if (ret < 0)
return ret;
- BUG_ON(ret > 0);
+ BUG_ON(ret > 0); /* Corruption */
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1008,9 +1009,9 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
new_size + extra_size, 1);
if (ret < 0)
return ret;
- BUG_ON(ret);
+ BUG_ON(ret); /* Corruption */
- ret = btrfs_extend_item(trans, root, path, new_size);
+ btrfs_extend_item(trans, root, path, new_size);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1478,7 +1479,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
err = ret;
goto out;
}
- BUG_ON(ret);
+ if (ret && !insert) {
+ err = -ENOENT;
+ goto out;
+ }
+ BUG_ON(ret); /* Corruption */
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -1592,13 +1597,13 @@ out:
* helper to add new inline back ref
*/
static noinline_for_stack
-int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_extent_inline_ref *iref,
- u64 parent, u64 root_objectid,
- u64 owner, u64 offset, int refs_to_add,
- struct btrfs_delayed_extent_op *extent_op)
+void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ u64 parent, u64 root_objectid,
+ u64 owner, u64 offset, int refs_to_add,
+ struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
@@ -1608,7 +1613,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
u64 refs;
int size;
int type;
- int ret;
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1617,7 +1621,7 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- ret = btrfs_extend_item(trans, root, path, size);
+ btrfs_extend_item(trans, root, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
@@ -1652,7 +1656,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
btrfs_mark_buffer_dirty(leaf);
- return 0;
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1687,12 +1690,12 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
* helper to update/remove inline back ref
*/
static noinline_for_stack
-int update_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_extent_inline_ref *iref,
- int refs_to_mod,
- struct btrfs_delayed_extent_op *extent_op)
+void update_inline_extent_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ int refs_to_mod,
+ struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
@@ -1703,7 +1706,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
u32 item_size;
int size;
int type;
- int ret;
u64 refs;
leaf = path->nodes[0];
@@ -1745,10 +1747,9 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- ret = btrfs_truncate_item(trans, root, path, item_size, 1);
+ btrfs_truncate_item(trans, root, path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
- return 0;
}
static noinline_for_stack
@@ -1768,13 +1769,13 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
root_objectid, owner, offset, 1);
if (ret == 0) {
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
- ret = update_inline_extent_backref(trans, root, path, iref,
- refs_to_add, extent_op);
+ update_inline_extent_backref(trans, root, path, iref,
+ refs_to_add, extent_op);
} else if (ret == -ENOENT) {
- ret = setup_inline_extent_backref(trans, root, path, iref,
- parent, root_objectid,
- owner, offset, refs_to_add,
- extent_op);
+ setup_inline_extent_backref(trans, root, path, iref, parent,
+ root_objectid, owner, offset,
+ refs_to_add, extent_op);
+ ret = 0;
}
return ret;
}
@@ -1804,12 +1805,12 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_extent_inline_ref *iref,
int refs_to_drop, int is_data)
{
- int ret;
+ int ret = 0;
BUG_ON(!is_data && refs_to_drop != 1);
if (iref) {
- ret = update_inline_extent_backref(trans, root, path, iref,
- -refs_to_drop, NULL);
+ update_inline_extent_backref(trans, root, path, iref,
+ -refs_to_drop, NULL);
} else if (is_data) {
ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
} else {
@@ -1835,6 +1836,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
bytenr, &num_bytes, &bbio, 0);
+ /* Error condition is -ENOMEM */
if (!ret) {
struct btrfs_bio_stripe *stripe = bbio->stripes;
int i;
@@ -1850,7 +1852,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
if (!ret)
discarded_bytes += stripe->length;
else if (ret != -EOPNOTSUPP)
- break;
+ break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
/*
* Just in case we get back EOPNOTSUPP for some reason,
@@ -1869,6 +1871,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
return ret;
}
+/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -1944,7 +1947,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
ret = insert_extent_backref(trans, root->fs_info->extent_root,
path, bytenr, parent, root_objectid,
owner, offset, refs_to_add);
- BUG_ON(ret);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
out:
btrfs_free_path(path);
return err;
@@ -2031,6 +2035,9 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
int ret;
int err = 0;
+ if (trans->aborted)
+ return 0;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2128,7 +2135,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
- int ret;
+ int ret = 0;
+
+ if (trans->aborted)
+ return 0;
+
if (btrfs_delayed_ref_is_head(node)) {
struct btrfs_delayed_ref_head *head;
/*
@@ -2146,11 +2157,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
ret = btrfs_del_csums(trans, root,
node->bytenr,
node->num_bytes);
- BUG_ON(ret);
}
}
mutex_unlock(&head->mutex);
- return 0;
+ return ret;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
@@ -2197,6 +2207,10 @@ again:
return NULL;
}
+/*
+ * Returns 0 on success or if called with an already aborted transaction.
+ * Returns -ENOMEM or -EIO on failure and will abort the transaction.
+ */
static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct list_head *cluster)
@@ -2285,9 +2299,13 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_delayed_extent_op(trans, root,
ref, extent_op);
- BUG_ON(ret);
kfree(extent_op);
+ if (ret) {
+ printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+ return ret;
+ }
+
goto next;
}
@@ -2308,11 +2326,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_one_delayed_ref(trans, root, ref, extent_op,
must_insert_reserved);
- BUG_ON(ret);
btrfs_put_delayed_ref(ref);
kfree(extent_op);
count++;
+
+ if (ret) {
+ printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+ return ret;
+ }
+
next:
do_chunk_alloc(trans, root->fs_info->extent_root,
2 * 1024 * 1024,
@@ -2347,6 +2370,9 @@ static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
* 0, which means to process everything in the tree at the start
* of the run (but not newly added entries), or it can be some target
* number you'd like to process.
+ *
+ * Returns 0 on success or if called with an aborted transaction
+ * Returns <0 on error and aborts the transaction
*/
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count)
@@ -2362,6 +2388,10 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long num_refs = 0;
int consider_waiting;
+ /* We'll clean this up in btrfs_cleanup_transaction */
+ if (trans->aborted)
+ return 0;
+
if (root == root->fs_info->extent_root)
root = root->fs_info->tree_root;
@@ -2419,7 +2449,11 @@ again:
}
ret = run_clustered_refs(trans, root, &cluster);
- BUG_ON(ret < 0);
+ if (ret < 0) {
+ spin_unlock(&delayed_refs->lock);
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
count -= min_t(unsigned long, ret, count);
@@ -2584,7 +2618,7 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
ret = -ENOENT;
if (path->slots[0] == 0)
@@ -2738,7 +2772,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
}
return 0;
fail:
- BUG();
return ret;
}
@@ -2767,7 +2800,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
if (ret < 0)
goto fail;
- BUG_ON(ret);
+ BUG_ON(ret); /* Corruption */
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -2775,8 +2808,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
fail:
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
return ret;
+ }
return 0;
}
@@ -2949,7 +2984,8 @@ again:
if (last == 0) {
err = btrfs_run_delayed_refs(trans, root,
(unsigned long)-1);
- BUG_ON(err);
+ if (err) /* File system offline */
+ goto out;
}
cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -2976,7 +3012,9 @@ again:
last = cache->key.objectid + cache->key.offset;
err = write_one_cache_group(trans, root, path, cache);
- BUG_ON(err);
+ if (err) /* File system offline */
+ goto out;
+
btrfs_put_block_group(cache);
}
@@ -2989,7 +3027,8 @@ again:
if (last == 0) {
err = btrfs_run_delayed_refs(trans, root,
(unsigned long)-1);
- BUG_ON(err);
+ if (err) /* File system offline */
+ goto out;
}
cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -3014,20 +3053,21 @@ again:
continue;
}
- btrfs_write_out_cache(root, trans, cache, path);
+ err = btrfs_write_out_cache(root, trans, cache, path);
/*
* If we didn't have an error then the cache state is still
* NEED_WRITE, so we can set it to WRITTEN.
*/
- if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
+ if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
cache->disk_cache_state = BTRFS_DC_WRITTEN;
last = cache->key.objectid + cache->key.offset;
btrfs_put_block_group(cache);
}
+out:
btrfs_free_path(path);
- return 0;
+ return err;
}
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -3098,11 +3138,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
- u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
- /* chunk -> extended profile */
- if (extra_flags == 0)
- extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+ u64 extra_flags = chunk_to_extended(flags) &
+ BTRFS_EXTENDED_PROFILE_MASK;
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags;
@@ -3113,6 +3150,35 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
/*
+ * returns target flags in extended format or 0 if restripe for this
+ * chunk_type is not in progress
+ */
+static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+ u64 target = 0;
+
+ BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
+ !spin_is_locked(&fs_info->balance_lock));
+
+ if (!bctl)
+ return 0;
+
+ if (flags & BTRFS_BLOCK_GROUP_DATA &&
+ bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+ } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
+ bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+ } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
+ bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+ }
+
+ return target;
+}
+
+/*
* @flags: available profiles in extended format (see ctree.h)
*
* Returns reduced profile in chunk format. If profile changing is in
@@ -3128,31 +3194,19 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
*/
u64 num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;
+ u64 target;
- /* pick restriper's target profile if it's available */
+ /*
+ * see if restripe for this chunk_type is in progress, if so
+ * try to reduce to the target profile
+ */
spin_lock(&root->fs_info->balance_lock);
- if (root->fs_info->balance_ctl) {
- struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
- u64 tgt = 0;
-
- if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
- (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (flags & bctl->data.target)) {
- tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
- } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
- (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (flags & bctl->sys.target)) {
- tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
- } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
- (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (flags & bctl->meta.target)) {
- tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
- }
-
- if (tgt) {
+ target = get_restripe_target(root->fs_info, flags);
+ if (target) {
+ /* pick target profile only if it's already available */
+ if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
spin_unlock(&root->fs_info->balance_lock);
- flags = tgt;
- goto out;
+ return extended_to_chunk(target);
}
}
spin_unlock(&root->fs_info->balance_lock);
@@ -3180,10 +3234,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
flags &= ~BTRFS_BLOCK_GROUP_RAID0;
}
-out:
- /* extended -> chunk profile */
- flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
- return flags;
+ return extended_to_chunk(flags);
}
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
@@ -3312,8 +3363,7 @@ commit_trans:
}
data_sinfo->bytes_may_use += bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
- (u64)(unsigned long)data_sinfo,
- bytes, 1);
+ data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
return 0;
@@ -3334,8 +3384,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
spin_lock(&data_sinfo->lock);
data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
- (u64)(unsigned long)data_sinfo,
- bytes, 0);
+ data_sinfo->flags, bytes, 0);
spin_unlock(&data_sinfo->lock);
}
@@ -3396,6 +3445,50 @@ static int should_alloc_chunk(struct btrfs_root *root,
return 1;
}
+static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
+{
+ u64 num_dev;
+
+ if (type & BTRFS_BLOCK_GROUP_RAID10 ||
+ type & BTRFS_BLOCK_GROUP_RAID0)
+ num_dev = root->fs_info->fs_devices->rw_devices;
+ else if (type & BTRFS_BLOCK_GROUP_RAID1)
+ num_dev = 2;
+ else
+ num_dev = 1; /* DUP or single */
+
+ /* metadata for updaing devices and chunk tree */
+ return btrfs_calc_trans_metadata_size(root, num_dev + 1);
+}
+
+static void check_system_chunk(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 type)
+{
+ struct btrfs_space_info *info;
+ u64 left;
+ u64 thresh;
+
+ info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+ spin_lock(&info->lock);
+ left = info->total_bytes - info->bytes_used - info->bytes_pinned -
+ info->bytes_reserved - info->bytes_readonly;
+ spin_unlock(&info->lock);
+
+ thresh = get_system_chunk_thresh(root, type);
+ if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
+ left, thresh, type);
+ dump_space_info(info, 0, 0);
+ }
+
+ if (left < thresh) {
+ u64 flags;
+
+ flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
+ btrfs_alloc_chunk(trans, root, flags);
+ }
+}
+
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 alloc_bytes,
u64 flags, int force)
@@ -3405,15 +3498,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
int wait_for_alloc = 0;
int ret = 0;
- BUG_ON(!profile_is_valid(flags, 0));
-
space_info = __find_space_info(extent_root->fs_info, flags);
if (!space_info) {
ret = update_space_info(extent_root->fs_info, flags,
0, 0, &space_info);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
- BUG_ON(!space_info);
+ BUG_ON(!space_info); /* Logic error */
again:
spin_lock(&space_info->lock);
@@ -3468,6 +3559,12 @@ again:
force_metadata_allocation(fs_info);
}
+ /*
+ * Check if we have enough space in SYSTEM chunk because we may need
+ * to update devices.
+ */
+ check_system_chunk(trans, extent_root, flags);
+
ret = btrfs_alloc_chunk(trans, extent_root, flags);
if (ret < 0 && ret != -ENOSPC)
goto out;
@@ -3678,8 +3775,10 @@ again:
ret = wait_event_interruptible(space_info->wait,
!space_info->flush);
/* Must have been interrupted, return */
- if (ret)
+ if (ret) {
+ printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
return -EINTR;
+ }
spin_lock(&space_info->lock);
}
@@ -3700,9 +3799,7 @@ again:
if (used + orig_bytes <= space_info->total_bytes) {
space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(root->fs_info,
- "space_info",
- (u64)(unsigned long)space_info,
- orig_bytes, 1);
+ "space_info", space_info->flags, orig_bytes, 1);
ret = 0;
} else {
/*
@@ -3771,9 +3868,7 @@ again:
if (used + num_bytes < space_info->total_bytes + avail) {
space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(root->fs_info,
- "space_info",
- (u64)(unsigned long)space_info,
- orig_bytes, 1);
+ "space_info", space_info->flags, orig_bytes, 1);
ret = 0;
} else {
wait_ordered = true;
@@ -3836,8 +3931,9 @@ out:
return ret;
}
-static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static struct btrfs_block_rsv *get_block_rsv(
+ const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root)
{
struct btrfs_block_rsv *block_rsv = NULL;
@@ -3918,8 +4014,7 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
space_info->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
- (u64)(unsigned long)space_info,
- num_bytes, 0);
+ space_info->flags, num_bytes, 0);
space_info->reservation_progress++;
spin_unlock(&space_info->lock);
}
@@ -4137,14 +4232,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
block_rsv->reserved += num_bytes;
sinfo->bytes_may_use += num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
- (u64)(unsigned long)sinfo, num_bytes, 1);
+ sinfo->flags, num_bytes, 1);
}
if (block_rsv->reserved >= block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
sinfo->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
- (u64)(unsigned long)sinfo, num_bytes, 0);
+ sinfo->flags, num_bytes, 0);
sinfo->reservation_progress++;
block_rsv->reserved = block_rsv->size;
block_rsv->full = 1;
@@ -4198,12 +4293,12 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
return;
trace_btrfs_space_reservation(root->fs_info, "transaction",
- (u64)(unsigned long)trans,
- trans->bytes_reserved, 0);
+ trans->transid, trans->bytes_reserved, 0);
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
trans->bytes_reserved = 0;
}
+/* Can only return 0 or -ENOSPC */
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode)
{
@@ -4540,7 +4635,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
while (total) {
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
- return -1;
+ return -ENOENT;
if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
@@ -4643,7 +4738,7 @@ int btrfs_pin_extent(struct btrfs_root *root,
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(root->fs_info, bytenr);
- BUG_ON(!cache);
+ BUG_ON(!cache); /* Logic error */
pin_down_extent(root, cache, bytenr, num_bytes, reserved);
@@ -4661,7 +4756,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(root->fs_info, bytenr);
- BUG_ON(!cache);
+ BUG_ON(!cache); /* Logic error */
/*
* pull in the free space cache (if any) so that our pin
@@ -4706,6 +4801,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
{
struct btrfs_space_info *space_info = cache->space_info;
int ret = 0;
+
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (reserve != RESERVE_FREE) {
@@ -4716,9 +4812,8 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
space_info->bytes_reserved += num_bytes;
if (reserve == RESERVE_ALLOC) {
trace_btrfs_space_reservation(cache->fs_info,
- "space_info",
- (u64)(unsigned long)space_info,
- num_bytes, 0);
+ "space_info", space_info->flags,
+ num_bytes, 0);
space_info->bytes_may_use -= num_bytes;
}
}
@@ -4734,7 +4829,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
return ret;
}
-int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4764,7 +4859,6 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
up_write(&fs_info->extent_commit_sem);
update_global_block_rsv(fs_info);
- return 0;
}
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
@@ -4779,7 +4873,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
if (cache)
btrfs_put_block_group(cache);
cache = btrfs_lookup_block_group(fs_info, start);
- BUG_ON(!cache);
+ BUG_ON(!cache); /* Logic error */
}
len = cache->key.objectid + cache->key.offset - start;
@@ -4816,6 +4910,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
u64 end;
int ret;
+ if (trans->aborted)
+ return 0;
+
if (fs_info->pinned_extents == &fs_info->freed_extents[0])
unpin = &fs_info->freed_extents[1];
else
@@ -4901,7 +4998,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = remove_extent_backref(trans, extent_root, path,
NULL, refs_to_drop,
is_data);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
btrfs_release_path(path);
path->leave_spinning = 1;
@@ -4919,10 +5017,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_print_leaf(extent_root,
path->nodes[0]);
}
- BUG_ON(ret);
+ if (ret < 0)
+ goto abort;
extent_slot = path->slots[0];
}
- } else {
+ } else if (ret == -ENOENT) {
btrfs_print_leaf(extent_root, path->nodes[0]);
WARN_ON(1);
printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
@@ -4932,6 +5031,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
(unsigned long long)root_objectid,
(unsigned long long)owner_objectid,
(unsigned long long)owner_offset);
+ } else {
+ goto abort;
}
leaf = path->nodes[0];
@@ -4941,7 +5042,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
BUG_ON(found_extent || extent_slot != path->slots[0]);
ret = convert_extent_item_v0(trans, extent_root, path,
owner_objectid, 0);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ goto abort;
btrfs_release_path(path);
path->leave_spinning = 1;
@@ -4958,7 +5060,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
(unsigned long long)bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
}
- BUG_ON(ret);
+ if (ret < 0)
+ goto abort;
extent_slot = path->slots[0];
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, extent_slot);
@@ -4995,7 +5098,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = remove_extent_backref(trans, extent_root, path,
iref, refs_to_drop,
is_data);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
}
} else {
if (found_extent) {
@@ -5012,23 +5116,27 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
btrfs_release_path(path);
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
- BUG_ON(ret);
- } else {
- invalidate_mapping_pages(info->btree_inode->i_mapping,
- bytenr >> PAGE_CACHE_SHIFT,
- (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
+ if (ret)
+ goto abort;
}
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
}
+out:
btrfs_free_path(path);
return ret;
+
+abort:
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
}
/*
@@ -5124,7 +5232,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
parent, root->root_key.objectid,
btrfs_header_level(buf),
BTRFS_DROP_DELAYED_REF, NULL, for_cow);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
if (!last_ref)
@@ -5158,6 +5266,7 @@ out:
btrfs_put_block_group(cache);
}
+/* Can return -ENOMEM */
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int for_cow)
@@ -5179,14 +5288,12 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
num_bytes,
parent, root_objectid, (int)owner,
BTRFS_DROP_DELAYED_REF, NULL, for_cow);
- BUG_ON(ret);
} else {
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
num_bytes,
parent, root_objectid, owner,
offset, BTRFS_DROP_DELAYED_REF,
NULL, for_cow);
- BUG_ON(ret);
}
return ret;
}
@@ -5243,28 +5350,34 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
return 0;
}
-static int get_block_group_index(struct btrfs_block_group_cache *cache)
+static int __get_block_group_index(u64 flags)
{
int index;
- if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
+
+ if (flags & BTRFS_BLOCK_GROUP_RAID10)
index = 0;
- else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
+ else if (flags & BTRFS_BLOCK_GROUP_RAID1)
index = 1;
- else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
+ else if (flags & BTRFS_BLOCK_GROUP_DUP)
index = 2;
- else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
+ else if (flags & BTRFS_BLOCK_GROUP_RAID0)
index = 3;
else
index = 4;
+
return index;
}
+static int get_block_group_index(struct btrfs_block_group_cache *cache)
+{
+ return __get_block_group_index(cache->flags);
+}
+
enum btrfs_loop_type {
- LOOP_FIND_IDEAL = 0,
- LOOP_CACHING_NOWAIT = 1,
- LOOP_CACHING_WAIT = 2,
- LOOP_ALLOC_CHUNK = 3,
- LOOP_NO_EMPTY_SIZE = 4,
+ LOOP_CACHING_NOWAIT = 0,
+ LOOP_CACHING_WAIT = 1,
+ LOOP_ALLOC_CHUNK = 2,
+ LOOP_NO_EMPTY_SIZE = 3,
};
/*
@@ -5278,7 +5391,6 @@ enum btrfs_loop_type {
static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *orig_root,
u64 num_bytes, u64 empty_size,
- u64 search_start, u64 search_end,
u64 hint_byte, struct btrfs_key *ins,
u64 data)
{
@@ -5287,6 +5399,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
struct btrfs_block_group_cache *used_block_group;
+ u64 search_start = 0;
int empty_cluster = 2 * 1024 * 1024;
int allowed_chunk_alloc = 0;
int done_chunk_alloc = 0;
@@ -5300,8 +5413,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
bool failed_alloc = false;
bool use_cluster = true;
bool have_caching_bg = false;
- u64 ideal_cache_percent = 0;
- u64 ideal_cache_offset = 0;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -5351,7 +5462,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
empty_cluster = 0;
if (search_start == hint_byte) {
-ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
used_block_group = block_group;
@@ -5363,8 +5473,7 @@ ideal_cache:
* picked out then we don't care that the block group is cached.
*/
if (block_group && block_group_bits(block_group, data) &&
- (block_group->cached != BTRFS_CACHE_NO ||
- search_start == ideal_cache_offset)) {
+ block_group->cached != BTRFS_CACHE_NO) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
block_group->ro) {
@@ -5418,44 +5527,13 @@ search:
have_block_group:
cached = block_group_cache_done(block_group);
if (unlikely(!cached)) {
- u64 free_percent;
-
found_uncached_bg = true;
ret = cache_block_group(block_group, trans,
- orig_root, 1);
- if (block_group->cached == BTRFS_CACHE_FINISHED)
- goto alloc;
-
- free_percent = btrfs_block_group_used(&block_group->item);
- free_percent *= 100;
- free_percent = div64_u64(free_percent,
- block_group->key.offset);
- free_percent = 100 - free_percent;
- if (free_percent > ideal_cache_percent &&
- likely(!block_group->ro)) {
- ideal_cache_offset = block_group->key.objectid;
- ideal_cache_percent = free_percent;
- }
-
- /*
- * The caching workers are limited to 2 threads, so we
- * can queue as much work as we care to.
- */
- if (loop > LOOP_FIND_IDEAL) {
- ret = cache_block_group(block_group, trans,
- orig_root, 0);
- BUG_ON(ret);
- }
-
- /*
- * If loop is set for cached only, try the next block
- * group.
- */
- if (loop == LOOP_FIND_IDEAL)
- goto loop;
+ orig_root, 0);
+ BUG_ON(ret < 0);
+ ret = 0;
}
-alloc:
if (unlikely(block_group->ro))
goto loop;
@@ -5606,11 +5684,6 @@ unclustered_alloc:
}
checks:
search_start = stripe_align(root, offset);
- /* move on to the next group */
- if (search_start + num_bytes >= search_end) {
- btrfs_add_free_space(used_block_group, offset, num_bytes);
- goto loop;
- }
/* move on to the next group */
if (search_start + num_bytes >
@@ -5661,9 +5734,7 @@ loop:
if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
goto search;
- /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
- * for them to make caching progress. Also
- * determine the best possible bg to cache
+ /*
* LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
* caching kthreads as we move along
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
@@ -5673,50 +5744,17 @@ loop:
*/
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
index = 0;
- if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
- found_uncached_bg = false;
- loop++;
- if (!ideal_cache_percent)
- goto search;
-
- /*
- * 1 of the following 2 things have happened so far
- *
- * 1) We found an ideal block group for caching that
- * is mostly full and will cache quickly, so we might
- * as well wait for it.
- *
- * 2) We searched for cached only and we didn't find
- * anything, and we didn't start any caching kthreads
- * either, so chances are we will loop through and
- * start a couple caching kthreads, and then come back
- * around and just wait for them. This will be slower
- * because we will have 2 caching kthreads reading at
- * the same time when we could have just started one
- * and waited for it to get far enough to give us an
- * allocation, so go ahead and go to the wait caching
- * loop.
- */
- loop = LOOP_CACHING_WAIT;
- search_start = ideal_cache_offset;
- ideal_cache_percent = 0;
- goto ideal_cache;
- } else if (loop == LOOP_FIND_IDEAL) {
- /*
- * Didn't find a uncached bg, wait on anything we find
- * next.
- */
- loop = LOOP_CACHING_WAIT;
- goto search;
- }
-
loop++;
-
if (loop == LOOP_ALLOC_CHUNK) {
if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
2 * 1024 * 1024, data,
CHUNK_ALLOC_LIMITED);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans,
+ root, ret);
+ goto out;
+ }
allowed_chunk_alloc = 0;
if (ret == 1)
done_chunk_alloc = 1;
@@ -5745,6 +5783,7 @@ loop:
} else if (ins->objectid) {
ret = 0;
}
+out:
return ret;
}
@@ -5798,12 +5837,10 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
- u64 search_end, struct btrfs_key *ins,
- u64 data)
+ struct btrfs_key *ins, u64 data)
{
bool final_tried = false;
int ret;
- u64 search_start = 0;
data = btrfs_get_alloc_profile(root, data);
again:
@@ -5811,23 +5848,31 @@ again:
* the only place that sets empty_size is btrfs_realloc_node, which
* is not called recursively on allocations
*/
- if (empty_size || root->ref_cows)
+ if (empty_size || root->ref_cows) {
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes + 2 * 1024 * 1024, data,
CHUNK_ALLOC_NO_FORCE);
+ if (ret < 0 && ret != -ENOSPC) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
+ }
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
- search_start, search_end, hint_byte,
- ins, data);
+ hint_byte, ins, data);
if (ret == -ENOSPC) {
if (!final_tried) {
num_bytes = num_bytes >> 1;
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
- do_chunk_alloc(trans, root->fs_info->extent_root,
+ ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes, data, CHUNK_ALLOC_FORCE);
+ if (ret < 0 && ret != -ENOSPC) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
@@ -5838,7 +5883,8 @@ again:
printk(KERN_ERR "btrfs allocation failed flags %llu, "
"wanted %llu\n", (unsigned long long)data,
(unsigned long long)num_bytes);
- dump_space_info(sinfo, num_bytes, 1);
+ if (sinfo)
+ dump_space_info(sinfo, num_bytes, 1);
}
}
@@ -5917,7 +5963,10 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
ins, size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_free_path(path);
+ return ret;
+ }
leaf = path->nodes[0];
extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -5947,7 +5996,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_free_path(path);
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
- if (ret) {
+ if (ret) { /* -ENOENT, logic error */
printk(KERN_ERR "btrfs update block group failed for %llu "
"%llu\n", (unsigned long long)ins->objectid,
(unsigned long long)ins->offset);
@@ -5978,7 +6027,10 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
ins, size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_free_path(path);
+ return ret;
+ }
leaf = path->nodes[0];
extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -6008,7 +6060,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_free_path(path);
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
- if (ret) {
+ if (ret) { /* -ENOENT, logic error */
printk(KERN_ERR "btrfs update block group failed for %llu "
"%llu\n", (unsigned long long)ins->objectid,
(unsigned long long)ins->offset);
@@ -6056,28 +6108,28 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
if (!caching_ctl) {
BUG_ON(!block_group_cache_done(block_group));
ret = btrfs_remove_free_space(block_group, start, num_bytes);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
} else {
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
ret = add_excluded_extent(root, start, num_bytes);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
} else if (start + num_bytes <= caching_ctl->progress) {
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
} else {
num_bytes = caching_ctl->progress - start;
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
start = caching_ctl->progress;
num_bytes = ins->objectid + ins->offset -
caching_ctl->progress;
ret = add_excluded_extent(root, start, num_bytes);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
mutex_unlock(&caching_ctl->mutex);
@@ -6086,7 +6138,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
ret = btrfs_update_reserved_bytes(block_group, ins->offset,
RESERVE_ALLOC_NO_ACCOUNT);
- BUG_ON(ret);
+ BUG_ON(ret); /* logic error */
btrfs_put_block_group(block_group);
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
@@ -6107,6 +6159,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(trans, root, buf);
+ clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
btrfs_set_lock_blocking(buf);
btrfs_set_buffer_uptodate(buf);
@@ -6214,7 +6267,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return ERR_CAST(block_rsv);
ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
- empty_size, hint, (u64)-1, &ins, 0);
+ empty_size, hint, &ins, 0);
if (ret) {
unuse_block_rsv(root->fs_info, block_rsv, blocksize);
return ERR_PTR(ret);
@@ -6222,7 +6275,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
buf = btrfs_init_new_buffer(trans, root, ins.objectid,
blocksize, level);
- BUG_ON(IS_ERR(buf));
+ BUG_ON(IS_ERR(buf)); /* -ENOMEM */
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
@@ -6234,7 +6287,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_delayed_extent_op *extent_op;
extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
- BUG_ON(!extent_op);
+ BUG_ON(!extent_op); /* -ENOMEM */
if (key)
memcpy(&extent_op->key, key, sizeof(extent_op->key));
else
@@ -6249,7 +6302,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
ins.offset, parent, root_objectid,
level, BTRFS_ADD_DELAYED_EXTENT,
extent_op, for_cow);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
return buf;
}
@@ -6319,7 +6372,9 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
/* We don't lock the tree block, it's OK to be racy here */
ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
&refs, &flags);
- BUG_ON(ret);
+ /* We don't care about errors in readahead. */
+ if (ret < 0)
+ continue;
BUG_ON(refs == 0);
if (wc->stage == DROP_REFERENCE) {
@@ -6386,7 +6441,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
eb->start, eb->len,
&wc->refs[level],
&wc->flags[level]);
- BUG_ON(ret);
+ BUG_ON(ret == -ENOMEM);
+ if (ret)
+ return ret;
BUG_ON(wc->refs[level] == 0);
}
@@ -6405,12 +6462,12 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
if (!(wc->flags[level] & flag)) {
BUG_ON(!path->locks[level]);
ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
eb->len, flag, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
wc->flags[level] |= flag;
}
@@ -6482,7 +6539,11 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
&wc->refs[level - 1],
&wc->flags[level - 1]);
- BUG_ON(ret);
+ if (ret < 0) {
+ btrfs_tree_unlock(next);
+ return ret;
+ }
+
BUG_ON(wc->refs[level - 1] == 0);
*lookup_info = 0;
@@ -6551,7 +6612,7 @@ skip:
ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
root->root_key.objectid, level - 1, 0, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
btrfs_tree_unlock(next);
free_extent_buffer(next);
@@ -6609,7 +6670,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
eb->start, eb->len,
&wc->refs[level],
&wc->flags[level]);
- BUG_ON(ret);
+ if (ret < 0) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ return ret;
+ }
BUG_ON(wc->refs[level] == 0);
if (wc->refs[level] == 1) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
@@ -6629,7 +6693,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
else
ret = btrfs_dec_ref(trans, root, eb, 0,
wc->for_reloc);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
/* make block locked assertion in clean_tree_block happy */
if (!path->locks[level] &&
@@ -6738,7 +6802,7 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
* also make sure backrefs for the shared block and all lower level
* blocks are properly updated.
*/
-void btrfs_drop_snapshot(struct btrfs_root *root,
+int btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref,
int for_reloc)
{
@@ -6766,7 +6830,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
}
trans = btrfs_start_transaction(tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out_free;
+ }
if (block_rsv)
trans->block_rsv = block_rsv;
@@ -6791,7 +6858,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
path->lowest_level = 0;
if (ret < 0) {
err = ret;
- goto out_free;
+ goto out_end_trans;
}
WARN_ON(ret > 0);
@@ -6811,7 +6878,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
path->nodes[level]->len,
&wc->refs[level],
&wc->flags[level]);
- BUG_ON(ret);
+ if (ret < 0) {
+ err = ret;
+ goto out_end_trans;
+ }
BUG_ON(wc->refs[level] == 0);
if (level == root_item->drop_level)
@@ -6862,26 +6932,40 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
root_item);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, tree_root, ret);
+ err = ret;
+ goto out_end_trans;
+ }
btrfs_end_transaction_throttle(trans, tree_root);
trans = btrfs_start_transaction(tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out_free;
+ }
if (block_rsv)
trans->block_rsv = block_rsv;
}
}
btrfs_release_path(path);
- BUG_ON(err);
+ if (err)
+ goto out_end_trans;
ret = btrfs_del_root(trans, tree_root, &root->root_key);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, tree_root, ret);
+ goto out_end_trans;
+ }
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
NULL, NULL);
- BUG_ON(ret < 0);
- if (ret > 0) {
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, tree_root, ret);
+ err = ret;
+ goto out_end_trans;
+ } else if (ret > 0) {
/* if we fail to delete the orphan item this time
* around, it'll get picked up the next time.
*
@@ -6899,14 +6983,15 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
free_extent_buffer(root->commit_root);
kfree(root);
}
-out_free:
+out_end_trans:
btrfs_end_transaction_throttle(trans, tree_root);
+out_free:
kfree(wc);
btrfs_free_path(path);
out:
if (err)
btrfs_std_error(root->fs_info, err);
- return;
+ return err;
}
/*
@@ -6983,31 +7068,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
{
u64 num_devices;
- u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
- BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
+ u64 stripped;
- if (root->fs_info->balance_ctl) {
- struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
- u64 tgt = 0;
-
- /* pick restriper's target profile and return */
- if (flags & BTRFS_BLOCK_GROUP_DATA &&
- bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
- tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
- } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
- bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
- tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
- } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
- bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
- tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
- }
-
- if (tgt) {
- /* extended -> chunk profile */
- tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
- return tgt;
- }
- }
+ /*
+ * if restripe for this chunk_type is on pick target profile and
+ * return, otherwise do the usual balance
+ */
+ stripped = get_restripe_target(root->fs_info, flags);
+ if (stripped)
+ return extended_to_chunk(stripped);
/*
* we add in the count of missing devices because we want
@@ -7017,6 +7086,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;
+ stripped = BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
+
if (num_devices == 1) {
stripped |= BTRFS_BLOCK_GROUP_DUP;
stripped = flags & ~stripped;
@@ -7029,7 +7101,6 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
return stripped | BTRFS_BLOCK_GROUP_DUP;
- return flags;
} else {
/* they already had raid on here, just return */
if (flags & stripped)
@@ -7042,9 +7113,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
if (flags & BTRFS_BLOCK_GROUP_DUP)
return stripped | BTRFS_BLOCK_GROUP_RAID1;
- /* turn single device chunks into raid0 */
- return stripped | BTRFS_BLOCK_GROUP_RAID0;
+ /* this is drive concat, leave it alone */
}
+
return flags;
}
@@ -7103,12 +7174,16 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
BUG_ON(cache->ro);
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
alloc_flags = update_block_group_flags(root, cache->flags);
- if (alloc_flags != cache->flags)
- do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
- CHUNK_ALLOC_FORCE);
+ if (alloc_flags != cache->flags) {
+ ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
+ if (ret < 0)
+ goto out;
+ }
ret = set_block_group_ro(cache, 0);
if (!ret)
@@ -7188,7 +7263,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
return free_bytes;
}
-int btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_set_block_group_rw(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *sinfo = cache->space_info;
@@ -7204,7 +7279,6 @@ int btrfs_set_block_group_rw(struct btrfs_root *root,
cache->ro = 0;
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
- return 0;
}
/*
@@ -7222,6 +7296,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
u64 min_free;
u64 dev_min = 1;
u64 dev_nr = 0;
+ u64 target;
int index;
int full = 0;
int ret = 0;
@@ -7262,13 +7337,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
/*
* ok we don't have enough space, but maybe we have free space on our
* devices to allocate new chunks for relocation, so loop through our
- * alloc devices and guess if we have enough space. However, if we
- * were marked as full, then we know there aren't enough chunks, and we
- * can just return.
+ * alloc devices and guess if we have enough space. if this block
+ * group is going to be restriped, run checks against the target
+ * profile instead of the current one.
*/
ret = -1;
- if (full)
- goto out;
/*
* index:
@@ -7278,7 +7351,20 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
* 3: raid0
* 4: single
*/
- index = get_block_group_index(block_group);
+ target = get_restripe_target(root->fs_info, block_group->flags);
+ if (target) {
+ index = __get_block_group_index(extended_to_chunk(target));
+ } else {
+ /*
+ * this is just a balance, so if we were marked as full
+ * we know there is no space for a new chunk
+ */
+ if (full)
+ goto out;
+
+ index = get_block_group_index(block_group);
+ }
+
if (index == 0) {
dev_min = 4;
/* Divide by 2 */
@@ -7572,7 +7658,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
&space_info);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
cache->space_info = space_info;
spin_lock(&cache->space_info->lock);
cache->space_info->bytes_readonly += cache->bytes_super;
@@ -7581,7 +7667,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
__link_block_group(space_info, cache);
ret = btrfs_add_block_group_cache(root->fs_info, cache);
- BUG_ON(ret);
+ BUG_ON(ret); /* Logic error */
set_avail_alloc_bits(root->fs_info, cache->flags);
if (btrfs_chunk_readonly(root, cache->key.objectid))
@@ -7663,7 +7749,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
&cache->space_info);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
update_global_block_rsv(root->fs_info);
spin_lock(&cache->space_info->lock);
@@ -7673,11 +7759,14 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
__link_block_group(cache->space_info, cache);
ret = btrfs_add_block_group_cache(root->fs_info, cache);
- BUG_ON(ret);
+ BUG_ON(ret); /* Logic error */
ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
sizeof(cache->item));
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ return ret;
+ }
set_avail_alloc_bits(extent_root->fs_info, type);
@@ -7686,11 +7775,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
- u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
- /* chunk -> extended profile */
- if (extra_flags == 0)
- extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+ u64 extra_flags = chunk_to_extended(flags) &
+ BTRFS_EXTENDED_PROFILE_MASK;
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits &= ~extra_flags;
@@ -7758,7 +7844,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
inode = lookup_free_space_inode(tree_root, block_group, path);
if (!IS_ERR(inode)) {
ret = btrfs_orphan_add(trans, inode);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_add_delayed_iput(inode);
+ goto out;
+ }
clear_nlink(inode);
/* One for the block groups ref */
spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2862454bcdb3..8d904dd7ea9f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -19,6 +19,7 @@
#include "btrfs_inode.h"
#include "volumes.h"
#include "check-integrity.h"
+#include "locking.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
@@ -53,6 +54,13 @@ struct extent_page_data {
unsigned int sync_io:1;
};
+static noinline void flush_write_bio(void *data);
+static inline struct btrfs_fs_info *
+tree_fs_info(struct extent_io_tree *tree)
+{
+ return btrfs_sb(tree->mapping->host->i_sb);
+}
+
int __init extent_io_init(void)
{
extent_state_cache = kmem_cache_create("extent_state",
@@ -136,6 +144,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
#endif
atomic_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
+ trace_alloc_extent_state(state, mask, _RET_IP_);
return state;
}
@@ -153,6 +162,7 @@ void free_extent_state(struct extent_state *state)
list_del(&state->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
#endif
+ trace_free_extent_state(state, _RET_IP_);
kmem_cache_free(extent_state_cache, state);
}
}
@@ -439,6 +449,13 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
return prealloc;
}
+void extent_io_tree_panic(struct extent_io_tree *tree, int err)
+{
+ btrfs_panic(tree_fs_info(tree), err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
+}
+
/*
* clear some bits on a range in the tree. This may require splitting
* or inserting elements in the tree, so the gfp mask is used to
@@ -449,8 +466,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
*
* the range [start, end] is inclusive.
*
- * This takes the tree lock, and returns < 0 on error, > 0 if any of the
- * bits were already set, or zero if none of the bits were already set.
+ * This takes the tree lock, and returns 0 on success and < 0 on error.
*/
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int wake, int delete,
@@ -464,7 +480,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct rb_node *node;
u64 last_end;
int err;
- int set = 0;
int clear = 0;
if (delete)
@@ -542,12 +557,14 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
+
prealloc = NULL;
if (err)
goto out;
if (state->end <= end) {
- set |= clear_state_bit(tree, state, &bits, wake);
+ clear_state_bit(tree, state, &bits, wake);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
@@ -564,17 +581,19 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
+
if (wake)
wake_up(&state->wq);
- set |= clear_state_bit(tree, prealloc, &bits, wake);
+ clear_state_bit(tree, prealloc, &bits, wake);
prealloc = NULL;
goto out;
}
- set |= clear_state_bit(tree, state, &bits, wake);
+ clear_state_bit(tree, state, &bits, wake);
next:
if (last_end == (u64)-1)
goto out;
@@ -591,7 +610,7 @@ out:
if (prealloc)
free_extent_state(prealloc);
- return set;
+ return 0;
search_again:
if (start > end)
@@ -602,8 +621,8 @@ search_again:
goto again;
}
-static int wait_on_state(struct extent_io_tree *tree,
- struct extent_state *state)
+static void wait_on_state(struct extent_io_tree *tree,
+ struct extent_state *state)
__releases(tree->lock)
__acquires(tree->lock)
{
@@ -613,7 +632,6 @@ static int wait_on_state(struct extent_io_tree *tree,
schedule();
spin_lock(&tree->lock);
finish_wait(&state->wq, &wait);
- return 0;
}
/*
@@ -621,7 +639,7 @@ static int wait_on_state(struct extent_io_tree *tree,
* The range [start, end] is inclusive.
* The tree lock is taken by this function
*/
-int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
+void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
{
struct extent_state *state;
struct rb_node *node;
@@ -658,7 +676,6 @@ again:
}
out:
spin_unlock(&tree->lock);
- return 0;
}
static void set_state_bits(struct extent_io_tree *tree,
@@ -706,9 +723,10 @@ static void uncache_state(struct extent_state **cached_ptr)
* [start, end] is inclusive This takes the tree lock.
*/
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int exclusive_bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask)
+static int __must_check
+__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ int bits, int exclusive_bits, u64 *failed_start,
+ struct extent_state **cached_state, gfp_t mask)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -742,8 +760,10 @@ again:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = insert_state(tree, prealloc, start, end, &bits);
+ if (err)
+ extent_io_tree_panic(tree, err);
+
prealloc = NULL;
- BUG_ON(err == -EEXIST);
goto out;
}
state = rb_entry(node, struct extent_state, rb_node);
@@ -809,7 +829,9 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
+
prealloc = NULL;
if (err)
goto out;
@@ -846,12 +868,9 @@ hit_next:
*/
err = insert_state(tree, prealloc, start, this_end,
&bits);
- BUG_ON(err == -EEXIST);
- if (err) {
- free_extent_state(prealloc);
- prealloc = NULL;
- goto out;
- }
+ if (err)
+ extent_io_tree_panic(tree, err);
+
cache_state(prealloc, cached_state);
prealloc = NULL;
start = this_end + 1;
@@ -873,7 +892,8 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
set_state_bits(tree, prealloc, &bits);
cache_state(prealloc, cached_state);
@@ -900,6 +920,15 @@ search_again:
goto again;
}
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
+ u64 *failed_start, struct extent_state **cached_state,
+ gfp_t mask)
+{
+ return __set_extent_bit(tree, start, end, bits, 0, failed_start,
+ cached_state, mask);
+}
+
+
/**
* convert_extent - convert all bits in a given range from one bit to another
* @tree: the io tree to search
@@ -946,7 +975,8 @@ again:
}
err = insert_state(tree, prealloc, start, end, &bits);
prealloc = NULL;
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
goto out;
}
state = rb_entry(node, struct extent_state, rb_node);
@@ -1002,7 +1032,8 @@ hit_next:
goto out;
}
err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
prealloc = NULL;
if (err)
goto out;
@@ -1041,12 +1072,8 @@ hit_next:
*/
err = insert_state(tree, prealloc, start, this_end,
&bits);
- BUG_ON(err == -EEXIST);
- if (err) {
- free_extent_state(prealloc);
- prealloc = NULL;
- goto out;
- }
+ if (err)
+ extent_io_tree_panic(tree, err);
prealloc = NULL;
start = this_end + 1;
goto search_again;
@@ -1065,7 +1092,8 @@ hit_next:
}
err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
+ if (err)
+ extent_io_tree_panic(tree, err);
set_state_bits(tree, prealloc, &bits);
clear_state_bit(tree, prealloc, &clear_bits, 0);
@@ -1095,14 +1123,14 @@ search_again:
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
+ return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
NULL, mask);
}
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask)
{
- return set_extent_bit(tree, start, end, bits, 0, NULL,
+ return set_extent_bit(tree, start, end, bits, NULL,
NULL, mask);
}
@@ -1117,7 +1145,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE,
- 0, NULL, cached_state, mask);
+ NULL, cached_state, mask);
}
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1131,7 +1159,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
+ return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
NULL, mask);
}
@@ -1139,7 +1167,7 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
- NULL, cached_state, mask);
+ cached_state, mask);
}
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1155,42 +1183,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
* us if waiting is desired.
*/
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, struct extent_state **cached_state, gfp_t mask)
+ int bits, struct extent_state **cached_state)
{
int err;
u64 failed_start;
while (1) {
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
- EXTENT_LOCKED, &failed_start,
- cached_state, mask);
- if (err == -EEXIST && (mask & __GFP_WAIT)) {
+ err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
+ EXTENT_LOCKED, &failed_start,
+ cached_state, GFP_NOFS);
+ if (err == -EEXIST) {
wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
start = failed_start;
- } else {
+ } else
break;
- }
WARN_ON(start > end);
}
return err;
}
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
- return lock_extent_bits(tree, start, end, 0, NULL, mask);
+ return lock_extent_bits(tree, start, end, 0, NULL);
}
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
int err;
u64 failed_start;
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
- &failed_start, NULL, mask);
+ err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
+ &failed_start, NULL, GFP_NOFS);
if (err == -EEXIST) {
if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, NULL, mask);
+ EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
return 0;
}
return 1;
@@ -1203,10 +1229,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
mask);
}
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
- mask);
+ GFP_NOFS);
}
/*
@@ -1220,7 +1246,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
while (index <= end_index) {
page = find_get_page(tree->mapping, index);
- BUG_ON(!page);
+ BUG_ON(!page); /* Pages should be in the extent_io_tree */
set_page_writeback(page);
page_cache_release(page);
index++;
@@ -1343,9 +1369,9 @@ out:
return found;
}
-static noinline int __unlock_for_delalloc(struct inode *inode,
- struct page *locked_page,
- u64 start, u64 end)
+static noinline void __unlock_for_delalloc(struct inode *inode,
+ struct page *locked_page,
+ u64 start, u64 end)
{
int ret;
struct page *pages[16];
@@ -1355,7 +1381,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
int i;
if (index == locked_page->index && end_index == index)
- return 0;
+ return;
while (nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
@@ -1370,7 +1396,6 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
index += ret;
cond_resched();
}
- return 0;
}
static noinline int lock_delalloc_pages(struct inode *inode,
@@ -1500,11 +1525,10 @@ again:
goto out_failed;
}
}
- BUG_ON(ret);
+ BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
/* step three, lock the state bits for the whole range */
- lock_extent_bits(tree, delalloc_start, delalloc_end,
- 0, &cached_state, GFP_NOFS);
+ lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -1761,39 +1785,34 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
* helper function to set a given page up to date if all the
* extents in the tree for that page are up to date
*/
-static int check_page_uptodate(struct extent_io_tree *tree,
- struct page *page)
+static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
{
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
SetPageUptodate(page);
- return 0;
}
/*
* helper function to unlock a page if all the extents in the tree
* for that page are unlocked
*/
-static int check_page_locked(struct extent_io_tree *tree,
- struct page *page)
+static void check_page_locked(struct extent_io_tree *tree, struct page *page)
{
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
unlock_page(page);
- return 0;
}
/*
* helper function to end page writeback if all the extents
* in the tree for that page are done with writeback
*/
-static int check_page_writeback(struct extent_io_tree *tree,
- struct page *page)
+static void check_page_writeback(struct extent_io_tree *tree,
+ struct page *page)
{
end_page_writeback(page);
- return 0;
}
/*
@@ -1912,6 +1931,26 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
return 0;
}
+int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
+ int mirror_num)
+{
+ struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+ u64 start = eb->start;
+ unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
+ int ret;
+
+ for (i = 0; i < num_pages; i++) {
+ struct page *p = extent_buffer_page(eb, i);
+ ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
+ start, p, mirror_num);
+ if (ret)
+ break;
+ start += PAGE_CACHE_SIZE;
+ }
+
+ return ret;
+}
+
/*
* each time an IO finishes, we do a fast check in the IO failure tree
* to see if we need to process or clean up an io_failure_record
@@ -2258,6 +2297,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
u64 start;
u64 end;
int whole_page;
+ int failed_mirror;
int ret;
if (err)
@@ -2304,9 +2344,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
else
clean_io_failure(start, page);
}
- if (!uptodate) {
- int failed_mirror;
+
+ if (!uptodate)
failed_mirror = (int)(unsigned long)bio->bi_bdev;
+
+ if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
+ ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
+ if (!ret && !err &&
+ test_bit(BIO_UPTODATE, &bio->bi_flags))
+ uptodate = 1;
+ } else if (!uptodate) {
/*
* The generic bio_readpage_error handles errors the
* following way: If possible, new read requests are
@@ -2320,7 +2367,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
ret = bio_readpage_error(bio, page, start, end,
failed_mirror, NULL);
if (ret == 0) {
-error_handled:
uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
@@ -2328,16 +2374,9 @@ error_handled:
uncache_state(&cached);
continue;
}
- if (tree->ops && tree->ops->readpage_io_failed_hook) {
- ret = tree->ops->readpage_io_failed_hook(
- bio, page, start, end,
- failed_mirror, state);
- if (ret == 0)
- goto error_handled;
- }
}
- if (uptodate) {
+ if (uptodate && tree->track_uptodate) {
set_extent_uptodate(tree, start, end, &cached,
GFP_ATOMIC);
}
@@ -2386,8 +2425,12 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
return bio;
}
-static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
- unsigned long bio_flags)
+/*
+ * Since writes are async, they will only return -ENOMEM.
+ * Reads can return the full range of I/O error conditions.
+ */
+static int __must_check submit_one_bio(int rw, struct bio *bio,
+ int mirror_num, unsigned long bio_flags)
{
int ret = 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2413,6 +2456,19 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
return ret;
}
+static int merge_bio(struct extent_io_tree *tree, struct page *page,
+ unsigned long offset, size_t size, struct bio *bio,
+ unsigned long bio_flags)
+{
+ int ret = 0;
+ if (tree->ops && tree->ops->merge_bio_hook)
+ ret = tree->ops->merge_bio_hook(page, offset, size, bio,
+ bio_flags);
+ BUG_ON(ret < 0);
+ return ret;
+
+}
+
static int submit_extent_page(int rw, struct extent_io_tree *tree,
struct page *page, sector_t sector,
size_t size, unsigned long offset,
@@ -2441,12 +2497,12 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
sector;
if (prev_bio_flags != bio_flags || !contig ||
- (tree->ops && tree->ops->merge_bio_hook &&
- tree->ops->merge_bio_hook(page, offset, page_size, bio,
- bio_flags)) ||
+ merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
bio_add_page(bio, page, page_size, offset) < page_size) {
ret = submit_one_bio(rw, bio, mirror_num,
prev_bio_flags);
+ if (ret < 0)
+ return ret;
bio = NULL;
} else {
return 0;
@@ -2473,25 +2529,31 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
return ret;
}
-void set_page_extent_mapped(struct page *page)
+void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
{
if (!PagePrivate(page)) {
SetPagePrivate(page);
page_cache_get(page);
- set_page_private(page, EXTENT_PAGE_PRIVATE);
+ set_page_private(page, (unsigned long)eb);
+ } else {
+ WARN_ON(page->private != (unsigned long)eb);
}
}
-static void set_page_extent_head(struct page *page, unsigned long len)
+void set_page_extent_mapped(struct page *page)
{
- WARN_ON(!PagePrivate(page));
- set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
+ if (!PagePrivate(page)) {
+ SetPagePrivate(page);
+ page_cache_get(page);
+ set_page_private(page, EXTENT_PAGE_PRIVATE);
+ }
}
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
* handlers)
+ * XXX JDM: This needs looking at to ensure proper page locking
*/
static int __extent_read_full_page(struct extent_io_tree *tree,
struct page *page,
@@ -2531,11 +2593,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
end = page_end;
while (1) {
- lock_extent(tree, start, end, GFP_NOFS);
+ lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
break;
- unlock_extent(tree, start, end, GFP_NOFS);
+ unlock_extent(tree, start, end);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
@@ -2572,7 +2634,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
- unlock_extent(tree, cur, end, GFP_NOFS);
+ unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em->start;
@@ -2624,7 +2686,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
if (test_range_bit(tree, cur, cur_end,
EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2634,7 +2696,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
*/
if (block_start == EXTENT_MAP_INLINE) {
SetPageError(page);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2654,6 +2716,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag);
+ BUG_ON(ret == -ENOMEM);
nr++;
*bio_flags = this_bio_flag;
}
@@ -2795,7 +2858,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
delalloc_end,
&page_started,
&nr_written);
- BUG_ON(ret);
+ /* File system has been set read-only */
+ if (ret) {
+ SetPageError(page);
+ goto done;
+ }
/*
* delalloc_end is already one less than the total
* length, so we don't subtract one from
@@ -2968,6 +3035,275 @@ done_unlocked:
return 0;
}
+static int eb_wait(void *word)
+{
+ io_schedule();
+ return 0;
+}
+
+static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static int lock_extent_buffer_for_io(struct extent_buffer *eb,
+ struct btrfs_fs_info *fs_info,
+ struct extent_page_data *epd)
+{
+ unsigned long i, num_pages;
+ int flush = 0;
+ int ret = 0;
+
+ if (!btrfs_try_tree_write_lock(eb)) {
+ flush = 1;
+ flush_write_bio(epd);
+ btrfs_tree_lock(eb);
+ }
+
+ if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
+ btrfs_tree_unlock(eb);
+ if (!epd->sync_io)
+ return 0;
+ if (!flush) {
+ flush_write_bio(epd);
+ flush = 1;
+ }
+ while (1) {
+ wait_on_extent_buffer_writeback(eb);
+ btrfs_tree_lock(eb);
+ if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
+ break;
+ btrfs_tree_unlock(eb);
+ }
+ }
+
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+ spin_lock(&fs_info->delalloc_lock);
+ if (fs_info->dirty_metadata_bytes >= eb->len)
+ fs_info->dirty_metadata_bytes -= eb->len;
+ else
+ WARN_ON(1);
+ spin_unlock(&fs_info->delalloc_lock);
+ ret = 1;
+ }
+
+ btrfs_tree_unlock(eb);
+
+ if (!ret)
+ return ret;
+
+ num_pages = num_extent_pages(eb->start, eb->len);
+ for (i = 0; i < num_pages; i++) {
+ struct page *p = extent_buffer_page(eb, i);
+
+ if (!trylock_page(p)) {
+ if (!flush) {
+ flush_write_bio(epd);
+ flush = 1;
+ }
+ lock_page(p);
+ }
+ }
+
+ return ret;
+}
+
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
+static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
+{
+ int uptodate = err == 0;
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct extent_buffer *eb;
+ int done;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ bvec--;
+ eb = (struct extent_buffer *)page->private;
+ BUG_ON(!eb);
+ done = atomic_dec_and_test(&eb->io_pages);
+
+ if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+
+ end_page_writeback(page);
+
+ if (!done)
+ continue;
+
+ end_extent_buffer_writeback(eb);
+ } while (bvec >= bio->bi_io_vec);
+
+ bio_put(bio);
+
+}
+
+static int write_one_eb(struct extent_buffer *eb,
+ struct btrfs_fs_info *fs_info,
+ struct writeback_control *wbc,
+ struct extent_page_data *epd)
+{
+ struct block_device *bdev = fs_info->fs_devices->latest_bdev;
+ u64 offset = eb->start;
+ unsigned long i, num_pages;
+ int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
+ int ret;
+
+ clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ num_pages = num_extent_pages(eb->start, eb->len);
+ atomic_set(&eb->io_pages, num_pages);
+ for (i = 0; i < num_pages; i++) {
+ struct page *p = extent_buffer_page(eb, i);
+
+ clear_page_dirty_for_io(p);
+ set_page_writeback(p);
+ ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
+ PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+ -1, end_bio_extent_buffer_writepage,
+ 0, 0, 0);
+ if (ret) {
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ SetPageError(p);
+ if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
+ end_extent_buffer_writeback(eb);
+ ret = -EIO;
+ break;
+ }
+ offset += PAGE_CACHE_SIZE;
+ update_nr_written(p, wbc, 1);
+ unlock_page(p);
+ }
+
+ if (unlikely(ret)) {
+ for (; i < num_pages; i++) {
+ struct page *p = extent_buffer_page(eb, i);
+ unlock_page(p);
+ }
+ }
+
+ return ret;
+}
+
+int btree_write_cache_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
+ struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
+ struct extent_buffer *eb, *prev_eb = NULL;
+ struct extent_page_data epd = {
+ .bio = NULL,
+ .tree = tree,
+ .extent_locked = 0,
+ .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ };
+ int ret = 0;
+ int done = 0;
+ int nr_to_write_done = 0;
+ struct pagevec pvec;
+ int nr_pages;
+ pgoff_t index;
+ pgoff_t end; /* Inclusive */
+ int scanned = 0;
+ int tag;
+
+ pagevec_init(&pvec, 0);
+ if (wbc->range_cyclic) {
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
+ } else {
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ scanned = 1;
+ }
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ tag = PAGECACHE_TAG_TOWRITE;
+ else
+ tag = PAGECACHE_TAG_DIRTY;
+retry:
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ tag_pages_for_writeback(mapping, index, end);
+ while (!done && !nr_to_write_done && (index <= end) &&
+ (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+ unsigned i;
+
+ scanned = 1;
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (!PagePrivate(page))
+ continue;
+
+ if (!wbc->range_cyclic && page->index > end) {
+ done = 1;
+ break;
+ }
+
+ eb = (struct extent_buffer *)page->private;
+ if (!eb) {
+ WARN_ON(1);
+ continue;
+ }
+
+ if (eb == prev_eb)
+ continue;
+
+ if (!atomic_inc_not_zero(&eb->refs)) {
+ WARN_ON(1);
+ continue;
+ }
+
+ prev_eb = eb;
+ ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
+ if (!ret) {
+ free_extent_buffer(eb);
+ continue;
+ }
+
+ ret = write_one_eb(eb, fs_info, wbc, &epd);
+ if (ret) {
+ done = 1;
+ free_extent_buffer(eb);
+ break;
+ }
+ free_extent_buffer(eb);
+
+ /*
+ * the filesystem may choose to bump up nr_to_write.
+ * We have to make sure to honor the new nr_to_write
+ * at any time
+ */
+ nr_to_write_done = wbc->nr_to_write <= 0;
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+ if (!scanned && !done) {
+ /*
+ * We hit the last page and there is more work to be done: wrap
+ * back to the start of the file
+ */
+ scanned = 1;
+ index = 0;
+ goto retry;
+ }
+ flush_write_bio(&epd);
+ return ret;
+}
+
/**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
@@ -3099,10 +3435,14 @@ retry:
static void flush_epd_write_bio(struct extent_page_data *epd)
{
if (epd->bio) {
+ int rw = WRITE;
+ int ret;
+
if (epd->sync_io)
- submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
- else
- submit_one_bio(WRITE, epd->bio, 0, 0);
+ rw = WRITE_SYNC;
+
+ ret = submit_one_bio(rw, epd->bio, 0, 0);
+ BUG_ON(ret < 0); /* -ENOMEM */
epd->bio = NULL;
}
}
@@ -3219,7 +3559,7 @@ int extent_readpages(struct extent_io_tree *tree,
}
BUG_ON(!list_empty(pages));
if (bio)
- submit_one_bio(READ, bio, 0, bio_flags);
+ return submit_one_bio(READ, bio, 0, bio_flags);
return 0;
}
@@ -3240,7 +3580,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
if (start > end)
return 0;
- lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
+ lock_extent_bits(tree, start, end, 0, &cached_state);
wait_on_page_writeback(page);
clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -3454,7 +3794,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
em = get_extent_skip_holes(inode, start, last_for_get_extent,
get_extent);
@@ -3548,26 +3888,7 @@ out:
inline struct page *extent_buffer_page(struct extent_buffer *eb,
unsigned long i)
{
- struct page *p;
- struct address_space *mapping;
-
- if (i == 0)
- return eb->first_page;
- i += eb->start >> PAGE_CACHE_SHIFT;
- mapping = eb->first_page->mapping;
- if (!mapping)
- return NULL;
-
- /*
- * extent_buffer_page is only called after pinning the page
- * by increasing the reference count. So we know the page must
- * be in the radix tree.
- */
- rcu_read_lock();
- p = radix_tree_lookup(&mapping->page_tree, i);
- rcu_read_unlock();
-
- return p;
+ return eb->pages[i];
}
inline unsigned long num_extent_pages(u64 start, u64 len)
@@ -3576,6 +3897,19 @@ inline unsigned long num_extent_pages(u64 start, u64 len)
(start >> PAGE_CACHE_SHIFT);
}
+static void __free_extent_buffer(struct extent_buffer *eb)
+{
+#if LEAK_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&leak_lock, flags);
+ list_del(&eb->leak_list);
+ spin_unlock_irqrestore(&leak_lock, flags);
+#endif
+ if (eb->pages && eb->pages != eb->inline_pages)
+ kfree(eb->pages);
+ kmem_cache_free(extent_buffer_cache, eb);
+}
+
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
u64 start,
unsigned long len,
@@ -3591,6 +3925,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
return NULL;
eb->start = start;
eb->len = len;
+ eb->tree = tree;
rwlock_init(&eb->lock);
atomic_set(&eb->write_locks, 0);
atomic_set(&eb->read_locks, 0);
@@ -3607,20 +3942,32 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
list_add(&eb->leak_list, &buffers);
spin_unlock_irqrestore(&leak_lock, flags);
#endif
+ spin_lock_init(&eb->refs_lock);
atomic_set(&eb->refs, 1);
+ atomic_set(&eb->io_pages, 0);
+
+ if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
+ struct page **pages;
+ int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ pages = kzalloc(num_pages, mask);
+ if (!pages) {
+ __free_extent_buffer(eb);
+ return NULL;
+ }
+ eb->pages = pages;
+ } else {
+ eb->pages = eb->inline_pages;
+ }
return eb;
}
-static void __free_extent_buffer(struct extent_buffer *eb)
+static int extent_buffer_under_io(struct extent_buffer *eb)
{
-#if LEAK_DEBUG
- unsigned long flags;
- spin_lock_irqsave(&leak_lock, flags);
- list_del(&eb->leak_list);
- spin_unlock_irqrestore(&leak_lock, flags);
-#endif
- kmem_cache_free(extent_buffer_cache, eb);
+ return (atomic_read(&eb->io_pages) ||
+ test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
+ test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
/*
@@ -3632,8 +3979,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
unsigned long index;
struct page *page;
- if (!eb->first_page)
- return;
+ BUG_ON(extent_buffer_under_io(eb));
index = num_extent_pages(eb->start, eb->len);
if (start_idx >= index)
@@ -3642,8 +3988,34 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
do {
index--;
page = extent_buffer_page(eb, index);
- if (page)
+ if (page) {
+ spin_lock(&page->mapping->private_lock);
+ /*
+ * We do this since we'll remove the pages after we've
+ * removed the eb from the radix tree, so we could race
+ * and have this page now attached to the new eb. So
+ * only clear page_private if it's still connected to
+ * this eb.
+ */
+ if (PagePrivate(page) &&
+ page->private == (unsigned long)eb) {
+ BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(PageDirty(page));
+ BUG_ON(PageWriteback(page));
+ /*
+ * We need to make sure we haven't be attached
+ * to a new eb.
+ */
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ /* One for the page private */
+ page_cache_release(page);
+ }
+ spin_unlock(&page->mapping->private_lock);
+
+ /* One for when we alloced the page */
page_cache_release(page);
+ }
} while (index != start_idx);
}
@@ -3656,9 +4028,50 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
__free_extent_buffer(eb);
}
+static void check_buffer_tree_ref(struct extent_buffer *eb)
+{
+ /* the ref bit is tricky. We have to make sure it is set
+ * if we have the buffer dirty. Otherwise the
+ * code to free a buffer can end up dropping a dirty
+ * page
+ *
+ * Once the ref bit is set, it won't go away while the
+ * buffer is dirty or in writeback, and it also won't
+ * go away while we have the reference count on the
+ * eb bumped.
+ *
+ * We can't just set the ref bit without bumping the
+ * ref on the eb because free_extent_buffer might
+ * see the ref bit and try to clear it. If this happens
+ * free_extent_buffer might end up dropping our original
+ * ref by mistake and freeing the page before we are able
+ * to add one more ref.
+ *
+ * So bump the ref count first, then set the bit. If someone
+ * beat us to it, drop the ref we added.
+ */
+ if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+ atomic_inc(&eb->refs);
+ if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+ atomic_dec(&eb->refs);
+ }
+}
+
+static void mark_extent_buffer_accessed(struct extent_buffer *eb)
+{
+ unsigned long num_pages, i;
+
+ check_buffer_tree_ref(eb);
+
+ num_pages = num_extent_pages(eb->start, eb->len);
+ for (i = 0; i < num_pages; i++) {
+ struct page *p = extent_buffer_page(eb, i);
+ mark_page_accessed(p);
+ }
+}
+
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len,
- struct page *page0)
+ u64 start, unsigned long len)
{
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
@@ -3674,7 +4087,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
- mark_page_accessed(eb->first_page);
+ mark_extent_buffer_accessed(eb);
return eb;
}
rcu_read_unlock();
@@ -3683,32 +4096,43 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
if (!eb)
return NULL;
- if (page0) {
- eb->first_page = page0;
- i = 1;
- index++;
- page_cache_get(page0);
- mark_page_accessed(page0);
- set_page_extent_mapped(page0);
- set_page_extent_head(page0, len);
- uptodate = PageUptodate(page0);
- } else {
- i = 0;
- }
- for (; i < num_pages; i++, index++) {
+ for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS);
if (!p) {
WARN_ON(1);
goto free_eb;
}
- set_page_extent_mapped(p);
- mark_page_accessed(p);
- if (i == 0) {
- eb->first_page = p;
- set_page_extent_head(p, len);
- } else {
- set_page_private(p, EXTENT_PAGE_PRIVATE);
+
+ spin_lock(&mapping->private_lock);
+ if (PagePrivate(p)) {
+ /*
+ * We could have already allocated an eb for this page
+ * and attached one so lets see if we can get a ref on
+ * the existing eb, and if we can we know it's good and
+ * we can just return that one, else we know we can just
+ * overwrite page->private.
+ */
+ exists = (struct extent_buffer *)p->private;
+ if (atomic_inc_not_zero(&exists->refs)) {
+ spin_unlock(&mapping->private_lock);
+ unlock_page(p);
+ mark_extent_buffer_accessed(exists);
+ goto free_eb;
+ }
+
+ /*
+ * Do this so attach doesn't complain and we need to
+ * drop the ref the old guy had.
+ */
+ ClearPagePrivate(p);
+ WARN_ON(PageDirty(p));
+ page_cache_release(p);
}
+ attach_extent_buffer_page(eb, p);
+ spin_unlock(&mapping->private_lock);
+ WARN_ON(PageDirty(p));
+ mark_page_accessed(p);
+ eb->pages[i] = p;
if (!PageUptodate(p))
uptodate = 0;
@@ -3716,12 +4140,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
* see below about how we avoid a nasty race with release page
* and why we unlock later
*/
- if (i != 0)
- unlock_page(p);
}
if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-
+again:
ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (ret)
goto free_eb;
@@ -3731,14 +4153,21 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
if (ret == -EEXIST) {
exists = radix_tree_lookup(&tree->buffer,
start >> PAGE_CACHE_SHIFT);
- /* add one reference for the caller */
- atomic_inc(&exists->refs);
+ if (!atomic_inc_not_zero(&exists->refs)) {
+ spin_unlock(&tree->buffer_lock);
+ radix_tree_preload_end();
+ exists = NULL;
+ goto again;
+ }
spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
+ mark_extent_buffer_accessed(exists);
goto free_eb;
}
/* add one reference for the tree */
- atomic_inc(&eb->refs);
+ spin_lock(&eb->refs_lock);
+ check_buffer_tree_ref(eb);
+ spin_unlock(&eb->refs_lock);
spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
@@ -3751,15 +4180,20 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
* after the extent buffer is in the radix tree so
* it doesn't get lost
*/
- set_page_extent_mapped(eb->first_page);
- set_page_extent_head(eb->first_page, eb->len);
- if (!page0)
- unlock_page(eb->first_page);
+ SetPageChecked(eb->pages[0]);
+ for (i = 1; i < num_pages; i++) {
+ p = extent_buffer_page(eb, i);
+ ClearPageChecked(p);
+ unlock_page(p);
+ }
+ unlock_page(eb->pages[0]);
return eb;
free_eb:
- if (eb->first_page && !page0)
- unlock_page(eb->first_page);
+ for (i = 0; i < num_pages; i++) {
+ if (eb->pages[i])
+ unlock_page(eb->pages[i]);
+ }
if (!atomic_dec_and_test(&eb->refs))
return exists;
@@ -3776,7 +4210,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
- mark_page_accessed(eb->first_page);
+ mark_extent_buffer_accessed(eb);
return eb;
}
rcu_read_unlock();
@@ -3784,19 +4218,71 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
return NULL;
}
+static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
+{
+ struct extent_buffer *eb =
+ container_of(head, struct extent_buffer, rcu_head);
+
+ __free_extent_buffer(eb);
+}
+
+/* Expects to have eb->eb_lock already held */
+static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
+{
+ WARN_ON(atomic_read(&eb->refs) == 0);
+ if (atomic_dec_and_test(&eb->refs)) {
+ struct extent_io_tree *tree = eb->tree;
+
+ spin_unlock(&eb->refs_lock);
+
+ spin_lock(&tree->buffer_lock);
+ radix_tree_delete(&tree->buffer,
+ eb->start >> PAGE_CACHE_SHIFT);
+ spin_unlock(&tree->buffer_lock);
+
+ /* Should be safe to release our pages at this point */
+ btrfs_release_extent_buffer_page(eb, 0);
+
+ call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
+ return;
+ }
+ spin_unlock(&eb->refs_lock);
+}
+
void free_extent_buffer(struct extent_buffer *eb)
{
if (!eb)
return;
- if (!atomic_dec_and_test(&eb->refs))
+ spin_lock(&eb->refs_lock);
+ if (atomic_read(&eb->refs) == 2 &&
+ test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
+ !extent_buffer_under_io(eb) &&
+ test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+ atomic_dec(&eb->refs);
+
+ /*
+ * I know this is terrible, but it's temporary until we stop tracking
+ * the uptodate bits and such for the extent buffers.
+ */
+ release_extent_buffer(eb, GFP_ATOMIC);
+}
+
+void free_extent_buffer_stale(struct extent_buffer *eb)
+{
+ if (!eb)
return;
- WARN_ON(1);
+ spin_lock(&eb->refs_lock);
+ set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
+
+ if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
+ test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+ atomic_dec(&eb->refs);
+ release_extent_buffer(eb, GFP_NOFS);
}
-int clear_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+void clear_extent_buffer_dirty(struct extent_buffer *eb)
{
unsigned long i;
unsigned long num_pages;
@@ -3812,10 +4298,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
lock_page(page);
WARN_ON(!PagePrivate(page));
- set_page_extent_mapped(page);
- if (i == 0)
- set_page_extent_head(page, eb->len);
-
clear_page_dirty_for_io(page);
spin_lock_irq(&page->mapping->tree_lock);
if (!PageDirty(page)) {
@@ -3827,24 +4309,29 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
ClearPageError(page);
unlock_page(page);
}
- return 0;
+ WARN_ON(atomic_read(&eb->refs) == 0);
}
-int set_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+int set_extent_buffer_dirty(struct extent_buffer *eb)
{
unsigned long i;
unsigned long num_pages;
int was_dirty = 0;
+ check_buffer_tree_ref(eb);
+
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+
num_pages = num_extent_pages(eb->start, eb->len);
+ WARN_ON(atomic_read(&eb->refs) == 0);
+ WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+
for (i = 0; i < num_pages; i++)
- __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
+ set_page_dirty(extent_buffer_page(eb, i));
return was_dirty;
}
-static int __eb_straddles_pages(u64 start, u64 len)
+static int range_straddles_pages(u64 start, u64 len)
{
if (len < PAGE_CACHE_SIZE)
return 1;
@@ -3855,25 +4342,14 @@ static int __eb_straddles_pages(u64 start, u64 len)
return 0;
}
-static int eb_straddles_pages(struct extent_buffer *eb)
-{
- return __eb_straddles_pages(eb->start, eb->len);
-}
-
-int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state **cached_state)
+int clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
unsigned long i;
struct page *page;
unsigned long num_pages;
- num_pages = num_extent_pages(eb->start, eb->len);
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-
- clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- cached_state, GFP_NOFS);
-
+ num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if (page)
@@ -3882,27 +4358,16 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
return 0;
}
-int set_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+int set_extent_buffer_uptodate(struct extent_buffer *eb)
{
unsigned long i;
struct page *page;
unsigned long num_pages;
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb->start, eb->len);
-
- if (eb_straddles_pages(eb)) {
- set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- NULL, GFP_NOFS);
- }
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
- if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
- ((i == num_pages - 1) &&
- ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
- check_page_uptodate(tree, page);
- continue;
- }
SetPageUptodate(page);
}
return 0;
@@ -3917,7 +4382,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
int uptodate;
unsigned long index;
- if (__eb_straddles_pages(start, end - start + 1)) {
+ if (range_straddles_pages(start, end - start + 1)) {
ret = test_range_bit(tree, start, end,
EXTENT_UPTODATE, 1, NULL);
if (ret)
@@ -3939,35 +4404,9 @@ int extent_range_uptodate(struct extent_io_tree *tree,
return pg_uptodate;
}
-int extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state *cached_state)
+int extent_buffer_uptodate(struct extent_buffer *eb)
{
- int ret = 0;
- unsigned long num_pages;
- unsigned long i;
- struct page *page;
- int pg_uptodate = 1;
-
- if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
- return 1;
-
- if (eb_straddles_pages(eb)) {
- ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1, cached_state);
- if (ret)
- return ret;
- }
-
- num_pages = num_extent_pages(eb->start, eb->len);
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if (!PageUptodate(page)) {
- pg_uptodate = 0;
- break;
- }
- }
- return pg_uptodate;
+ return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}
int read_extent_buffer_pages(struct extent_io_tree *tree,
@@ -3981,21 +4420,14 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
int ret = 0;
int locked_pages = 0;
int all_uptodate = 1;
- int inc_all_pages = 0;
unsigned long num_pages;
+ unsigned long num_reads = 0;
struct bio *bio = NULL;
unsigned long bio_flags = 0;
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
- if (eb_straddles_pages(eb)) {
- if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1, NULL)) {
- return 0;
- }
- }
-
if (start) {
WARN_ON(start < eb->start);
start_i = (start >> PAGE_CACHE_SHIFT) -
@@ -4014,8 +4446,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
lock_page(page);
}
locked_pages++;
- if (!PageUptodate(page))
+ if (!PageUptodate(page)) {
+ num_reads++;
all_uptodate = 0;
+ }
}
if (all_uptodate) {
if (start_i == 0)
@@ -4023,20 +4457,12 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
goto unlock_exit;
}
+ clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ eb->failed_mirror = 0;
+ atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
-
- WARN_ON(!PagePrivate(page));
-
- set_page_extent_mapped(page);
- if (i == 0)
- set_page_extent_head(page, eb->len);
-
- if (inc_all_pages)
- page_cache_get(page);
if (!PageUptodate(page)) {
- if (start_i == 0)
- inc_all_pages = 1;
ClearPageError(page);
err = __extent_read_full_page(tree, page,
get_extent, &bio,
@@ -4048,8 +4474,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
}
}
- if (bio)
- submit_one_bio(READ, bio, mirror_num, bio_flags);
+ if (bio) {
+ err = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ if (err)
+ return err;
+ }
if (ret || wait != WAIT_COMPLETE)
return ret;
@@ -4061,8 +4490,6 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
ret = -EIO;
}
- if (!ret)
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
return ret;
unlock_exit:
@@ -4304,15 +4731,20 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
{
char *dst_kaddr = page_address(dst_page);
char *src_kaddr;
+ int must_memmove = 0;
if (dst_page != src_page) {
src_kaddr = page_address(src_page);
} else {
src_kaddr = dst_kaddr;
- BUG_ON(areas_overlap(src_off, dst_off, len));
+ if (areas_overlap(src_off, dst_off, len))
+ must_memmove = 1;
}
- memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
+ if (must_memmove)
+ memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
+ else
+ memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
}
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
@@ -4382,7 +4814,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
- if (!areas_overlap(src_offset, dst_offset, len)) {
+ if (dst_offset < src_offset) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
@@ -4408,47 +4840,48 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
}
}
-static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
+int try_release_extent_buffer(struct page *page, gfp_t mask)
{
- struct extent_buffer *eb =
- container_of(head, struct extent_buffer, rcu_head);
-
- btrfs_release_extent_buffer(eb);
-}
-
-int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
-{
- u64 start = page_offset(page);
struct extent_buffer *eb;
- int ret = 1;
- spin_lock(&tree->buffer_lock);
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
- if (!eb) {
- spin_unlock(&tree->buffer_lock);
- return ret;
+ /*
+ * We need to make sure noboody is attaching this page to an eb right
+ * now.
+ */
+ spin_lock(&page->mapping->private_lock);
+ if (!PagePrivate(page)) {
+ spin_unlock(&page->mapping->private_lock);
+ return 1;
}
- if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
- ret = 0;
- goto out;
- }
+ eb = (struct extent_buffer *)page->private;
+ BUG_ON(!eb);
/*
- * set @eb->refs to 0 if it is already 1, and then release the @eb.
- * Or go back.
+ * This is a little awful but should be ok, we need to make sure that
+ * the eb doesn't disappear out from under us while we're looking at
+ * this page.
*/
- if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
- ret = 0;
- goto out;
+ spin_lock(&eb->refs_lock);
+ if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+ spin_unlock(&eb->refs_lock);
+ spin_unlock(&page->mapping->private_lock);
+ return 0;
}
+ spin_unlock(&page->mapping->private_lock);
- radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
-out:
- spin_unlock(&tree->buffer_lock);
+ if ((mask & GFP_NOFS) == GFP_NOFS)
+ mask = GFP_NOFS;
- /* at this point we can safely release the extent buffer */
- if (atomic_read(&eb->refs) == 0)
- call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
- return ret;
+ /*
+ * If tree ref isn't set then we know the ref on this eb is a real ref,
+ * so just return, this page will likely be freed soon anyway.
+ */
+ if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+ spin_unlock(&eb->refs_lock);
+ return 0;
+ }
+ release_extent_buffer(eb, mask);
+
+ return 1;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index cecc3518c121..faf10eb57f75 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -35,6 +35,10 @@
#define EXTENT_BUFFER_DIRTY 2
#define EXTENT_BUFFER_CORRUPT 3
#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
+#define EXTENT_BUFFER_TREE_REF 5
+#define EXTENT_BUFFER_STALE 6
+#define EXTENT_BUFFER_WRITEBACK 7
+#define EXTENT_BUFFER_IOERR 8
/* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -54,6 +58,7 @@
#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
struct extent_state;
+struct btrfs_root;
typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
@@ -69,9 +74,7 @@ struct extent_io_ops {
size_t size, struct bio *bio,
unsigned long bio_flags);
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
- u64 start, u64 end, int failed_mirror,
- struct extent_state *state);
+ int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
u64 start, u64 end,
struct extent_state *state);
@@ -97,6 +100,7 @@ struct extent_io_tree {
struct radix_tree_root buffer;
struct address_space *mapping;
u64 dirty_bytes;
+ int track_uptodate;
spinlock_t lock;
spinlock_t buffer_lock;
struct extent_io_ops *ops;
@@ -119,16 +123,21 @@ struct extent_state {
struct list_head leak_list;
};
+#define INLINE_EXTENT_BUFFER_PAGES 16
+#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
struct extent_buffer {
u64 start;
unsigned long len;
unsigned long map_start;
unsigned long map_len;
- struct page *first_page;
unsigned long bflags;
+ struct extent_io_tree *tree;
+ spinlock_t refs_lock;
+ atomic_t refs;
+ atomic_t io_pages;
+ int failed_mirror;
struct list_head leak_list;
struct rcu_head rcu_head;
- atomic_t refs;
pid_t lock_owner;
/* count of read lock holders on the extent buffer */
@@ -152,6 +161,9 @@ struct extent_buffer {
* to unlock
*/
wait_queue_head_t read_lock_wq;
+ wait_queue_head_t lock_wq;
+ struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
+ struct page **pages;
};
static inline void extent_set_compress_type(unsigned long *bio_flags,
@@ -178,18 +190,17 @@ void extent_io_tree_init(struct extent_io_tree *tree,
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
-int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
+int try_release_extent_buffer(struct page *page, gfp_t mask);
int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, struct extent_state **cached, gfp_t mask);
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+ int bits, struct extent_state **cached);
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask);
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask);
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent, int mirror_num);
int __init extent_io_init(void);
@@ -210,7 +221,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask);
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int exclusive_bits, u64 *failed_start,
+ int bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
@@ -240,6 +251,8 @@ int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
get_extent_t *get_extent,
struct writeback_control *wbc);
+int btree_write_cache_pages(struct address_space *mapping,
+ struct writeback_control *wbc);
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages,
@@ -251,11 +264,11 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len,
- struct page *page0);
+ u64 start, unsigned long len);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb);
+void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_NONE 0
#define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2
@@ -287,19 +300,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
-int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
-int clear_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int set_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int set_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state **cached_state);
-int extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state *cached_state);
+void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
+void clear_extent_buffer_dirty(struct extent_buffer *eb);
+int set_extent_buffer_dirty(struct extent_buffer *eb);
+int set_extent_buffer_uptodate(struct extent_buffer *eb);
+int clear_extent_buffer_uptodate(struct extent_buffer *eb);
+int extent_buffer_uptodate(struct extent_buffer *eb);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **map,
unsigned long *map_start,
@@ -320,4 +326,6 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
u64 length, u64 logical, struct page *page,
int mirror_num);
int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
+int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
+ int mirror_num);
#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 078b4fd54500..5d158d320233 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -25,10 +25,12 @@
#include "transaction.h"
#include "print-tree.h"
-#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
+#define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) * 2) / \
size) - 1))
+#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE))
+
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \
sizeof(struct btrfs_sector_sum) * \
@@ -59,7 +61,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
sizeof(*item));
if (ret < 0)
goto out;
- BUG_ON(ret);
+ BUG_ON(ret); /* Can't happen */
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -284,6 +286,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct btrfs_ordered_sum *sums;
struct btrfs_sector_sum *sector_sum;
struct btrfs_csum_item *item;
+ LIST_HEAD(tmplist);
unsigned long offset;
int ret;
size_t size;
@@ -358,7 +361,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
MAX_ORDERED_SUM_BYTES(root));
sums = kzalloc(btrfs_ordered_sum_size(root, size),
GFP_NOFS);
- BUG_ON(!sums);
+ if (!sums) {
+ ret = -ENOMEM;
+ goto fail;
+ }
sector_sum = sums->sums;
sums->bytenr = start;
@@ -380,12 +386,19 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
offset += csum_size;
sector_sum++;
}
- list_add_tail(&sums->list, list);
+ list_add_tail(&sums->list, &tmplist);
}
path->slots[0]++;
}
ret = 0;
fail:
+ while (ret < 0 && !list_empty(&tmplist)) {
+ sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+ list_splice_tail(&tmplist, list);
+
btrfs_free_path(path);
return ret;
}
@@ -420,7 +433,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
ordered = btrfs_lookup_ordered_extent(inode, offset);
- BUG_ON(!ordered);
+ BUG_ON(!ordered); /* Logic error */
sums->bytenr = ordered->start;
while (bio_index < bio->bi_vcnt) {
@@ -439,11 +452,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
GFP_NOFS);
- BUG_ON(!sums);
+ BUG_ON(!sums); /* -ENOMEM */
sector_sum = sums->sums;
sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode, offset);
- BUG_ON(!ordered);
+ BUG_ON(!ordered); /* Logic error */
sums->bytenr = ordered->start;
}
@@ -483,18 +496,17 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
* This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required.
*/
-static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *key,
- u64 bytenr, u64 len)
+static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ u64 bytenr, u64 len)
{
struct extent_buffer *leaf;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
u64 csum_end;
u64 end_byte = bytenr + len;
u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
- int ret;
leaf = path->nodes[0];
csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
@@ -510,7 +522,7 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
- ret = btrfs_truncate_item(trans, root, path, new_size, 1);
+ btrfs_truncate_item(trans, root, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@@ -522,15 +534,13 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
- ret = btrfs_truncate_item(trans, root, path, new_size, 0);
+ btrfs_truncate_item(trans, root, path, new_size, 0);
key->offset = end_byte;
- ret = btrfs_set_item_key_safe(trans, root, path, key);
- BUG_ON(ret);
+ btrfs_set_item_key_safe(trans, root, path, key);
} else {
BUG();
}
- return 0;
}
/*
@@ -635,13 +645,14 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
* item changed size or key
*/
ret = btrfs_split_item(trans, root, path, &key, offset);
- BUG_ON(ret && ret != -EAGAIN);
+ if (ret && ret != -EAGAIN) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
key.offset = end_byte - 1;
} else {
- ret = truncate_one_csum(trans, root, path,
- &key, bytenr, len);
- BUG_ON(ret);
+ truncate_one_csum(trans, root, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
@@ -772,7 +783,7 @@ again:
if (diff != csum_size)
goto insert;
- ret = btrfs_extend_item(trans, root, path, diff);
+ btrfs_extend_item(trans, root, path, diff);
goto csum;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e8d06b6b9194..d83260d7498f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -452,7 +452,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split = alloc_extent_map();
if (!split2)
split2 = alloc_extent_map();
- BUG_ON(!split || !split2);
+ BUG_ON(!split || !split2); /* -ENOMEM */
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
@@ -494,7 +494,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->flags = flags;
split->compress_type = em->compress_type;
ret = add_extent_mapping(em_tree, split);
- BUG_ON(ret);
+ BUG_ON(ret); /* Logic error */
free_extent_map(split);
split = split2;
split2 = NULL;
@@ -520,7 +520,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
}
ret = add_extent_mapping(em_tree, split);
- BUG_ON(ret);
+ BUG_ON(ret); /* Logic error */
free_extent_map(split);
split = NULL;
}
@@ -679,7 +679,7 @@ next_slot:
root->root_key.objectid,
new_key.objectid,
start - extent_offset, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
*hint_byte = disk_bytenr;
}
key.offset = start;
@@ -754,7 +754,7 @@ next_slot:
root->root_key.objectid,
key.objectid, key.offset -
extent_offset, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
inode_sub_bytes(inode,
extent_end - key.offset);
*hint_byte = disk_bytenr;
@@ -770,7 +770,10 @@ next_slot:
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
del_nr = 0;
del_slot = 0;
@@ -782,11 +785,13 @@ next_slot:
BUG_ON(1);
}
- if (del_nr > 0) {
+ if (!ret && del_nr > 0) {
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- BUG_ON(ret);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
}
+out:
btrfs_free_path(path);
return ret;
}
@@ -944,7 +949,10 @@ again:
btrfs_release_path(path);
goto again;
}
- BUG_ON(ret < 0);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
@@ -963,7 +971,7 @@ again:
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
ino, orig_offset, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (split == start) {
key.offset = start;
@@ -990,7 +998,7 @@ again:
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
other_start = 0;
other_end = start;
@@ -1007,7 +1015,7 @@ again:
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
if (del_nr == 0) {
fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -1025,7 +1033,10 @@ again:
btrfs_mark_buffer_dirty(leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- BUG_ON(ret);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
}
out:
btrfs_free_path(path);
@@ -1105,8 +1116,7 @@ again:
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
lock_extent_bits(&BTRFS_I(inode)->io_tree,
- start_pos, last_pos - 1, 0, &cached_state,
- GFP_NOFS);
+ start_pos, last_pos - 1, 0, &cached_state);
ordered = btrfs_lookup_first_ordered_extent(inode,
last_pos - 1);
if (ordered &&
@@ -1638,7 +1648,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* transaction
*/
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
- locked_end, 0, &cached_state, GFP_NOFS);
+ locked_end, 0, &cached_state);
ordered = btrfs_lookup_first_ordered_extent(inode,
alloc_end - 1);
if (ordered &&
@@ -1667,7 +1677,13 @@ static long btrfs_fallocate(struct file *file, int mode,
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
- BUG_ON(IS_ERR_OR_NULL(em));
+ if (IS_ERR_OR_NULL(em)) {
+ if (!em)
+ ret = -ENOMEM;
+ else
+ ret = PTR_ERR(em);
+ break;
+ }
last_byte = min(extent_map_end(em), alloc_end);
actual_end = min_t(u64, extent_map_end(em), offset + len);
last_byte = (last_byte + mask) & ~mask;
@@ -1737,7 +1753,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
return -ENXIO;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
/*
* Delalloc is such a pain. If we have a hole and we have pending
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index b02e379b14c7..e88330d3df52 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -230,11 +230,13 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
if (ret) {
trans->block_rsv = rsv;
- WARN_ON(1);
+ btrfs_abort_transaction(trans, root, ret);
return ret;
}
ret = btrfs_update_inode(trans, root, inode);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
trans->block_rsv = rsv;
return ret;
@@ -869,7 +871,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_prepare_pages(&io_ctl, inode, 0);
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- 0, &cached_state, GFP_NOFS);
+ 0, &cached_state);
node = rb_first(&ctl->free_space_offset);
if (!node && cluster) {
@@ -1948,14 +1950,14 @@ again:
*/
ret = btrfs_add_free_space(block_group, old_start,
offset - old_start);
- WARN_ON(ret);
+ WARN_ON(ret); /* -ENOMEM */
goto out;
}
ret = remove_from_bitmap(ctl, info, &offset, &bytes);
if (ret == -EAGAIN)
goto again;
- BUG_ON(ret);
+ BUG_ON(ret); /* logic error */
out_lock:
spin_unlock(&ctl->tree_lock);
out:
@@ -2346,7 +2348,7 @@ again:
rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -EEXIST; Logic error */
trace_btrfs_setup_cluster(block_group, cluster,
total_found * block_group->sectorsize, 1);
@@ -2439,7 +2441,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 0);
total_size += entry->bytes;
- BUG_ON(ret);
+ BUG_ON(ret); /* -EEXIST; Logic error */
} while (node && entry != last);
cluster->max_size = max_extent;
@@ -2830,6 +2832,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
int ret;
ret = search_bitmap(ctl, entry, &offset, &count);
+ /* Logic error; Should be empty if it can't find anything */
BUG_ON(ret);
ino = offset;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index baa74f3db691..a13cf1a96c73 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -19,6 +19,7 @@
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
+#include "print-tree.h"
static int find_name_in_backref(struct btrfs_path *path, const char *name,
int name_len, struct btrfs_inode_ref **ref_ret)
@@ -128,13 +129,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- ret = btrfs_truncate_item(trans, root, path,
+ btrfs_truncate_item(trans, root, path,
item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
return ret;
}
+/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
@@ -165,7 +167,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- ret = btrfs_extend_item(trans, root, path, ins_len);
+ btrfs_extend_item(trans, root, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index ee15d88b33d2..b1a1c929ba80 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -178,7 +178,7 @@ static void start_caching(struct btrfs_root *root)
tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
root->root_key.objectid);
- BUG_ON(IS_ERR(tsk));
+ BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
}
int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -271,7 +271,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
break;
info = rb_entry(n, struct btrfs_free_space, offset_index);
- BUG_ON(info->bitmap);
+ BUG_ON(info->bitmap); /* Logic error */
if (info->offset > root->cache_progress)
goto free;
@@ -439,17 +439,16 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (ret)
goto out;
trace_btrfs_space_reservation(root->fs_info, "ino_cache",
- (u64)(unsigned long)trans,
- trans->bytes_reserved, 1);
+ trans->transid, trans->bytes_reserved, 1);
again:
inode = lookup_free_ino_inode(root, path);
- if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+ if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
ret = PTR_ERR(inode);
goto out_release;
}
if (IS_ERR(inode)) {
- BUG_ON(retry);
+ BUG_ON(retry); /* Logic error */
retry = true;
ret = create_free_ino_inode(root, trans, path);
@@ -460,12 +459,17 @@ again:
BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, root, inode);
- WARN_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_put;
+ }
if (i_size_read(inode) > 0) {
ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto out_put;
+ }
}
spin_lock(&root->cache_lock);
@@ -502,8 +506,7 @@ out_put:
iput(inode);
out_release:
trace_btrfs_space_reservation(root->fs_info, "ino_cache",
- (u64)(unsigned long)trans,
- trans->bytes_reserved, 0);
+ trans->transid, trans->bytes_reserved, 0);
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
out:
trans->block_rsv = rsv;
@@ -532,7 +535,7 @@ static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
goto error;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
if (path->slots[0] > 0) {
slot = path->slots[0] - 1;
l = path->nodes[0];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3a0b5c1f9d31..115bc05e42b0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -150,7 +150,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
inode_add_bytes(inode, size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
- BUG_ON(ret);
if (ret) {
err = ret;
goto fail;
@@ -206,9 +205,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
* could end up racing with unlink.
*/
BTRFS_I(inode)->disk_i_size = inode->i_size;
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
- return 0;
+ return ret;
fail:
btrfs_free_path(path);
return err;
@@ -250,14 +249,18 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
ret = btrfs_drop_extents(trans, inode, start, aligned_end,
&hint_byte, 1);
- BUG_ON(ret);
+ if (ret)
+ return ret;
if (isize > actual_end)
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, root, inode, start,
inline_len, compressed_size,
compress_type, compressed_pages);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
@@ -293,7 +296,7 @@ static noinline int add_async_extent(struct async_cow *cow,
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
- BUG_ON(!async_extent);
+ BUG_ON(!async_extent); /* -ENOMEM */
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
@@ -344,8 +347,9 @@ static noinline int compress_file_range(struct inode *inode,
int will_compress;
int compress_type = root->fs_info->compress_type;
- /* if this is a small write inside eof, kick off a defragbot */
- if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
+ /* if this is a small write inside eof, kick off a defrag */
+ if ((end - start + 1) < 16 * 1024 &&
+ (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
actual_end = min_t(u64, isize, end + 1);
@@ -433,7 +437,11 @@ again:
cont:
if (start == 0) {
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto cleanup_and_out;
+ }
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
/* lets try to make an inline extent */
@@ -450,11 +458,11 @@ cont:
total_compressed,
compress_type, pages);
}
- if (ret == 0) {
+ if (ret <= 0) {
/*
- * inline extent creation worked, we don't need
- * to create any more async work items. Unlock
- * and free up our temp pages.
+ * inline extent creation worked or returned error,
+ * we don't need to create any more async work items.
+ * Unlock and free up our temp pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
@@ -547,7 +555,7 @@ cleanup_and_bail_uncompressed:
}
out:
- return 0;
+ return ret;
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
@@ -557,6 +565,20 @@ free_pages_out:
kfree(pages);
goto out;
+
+cleanup_and_out:
+ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+ start, end, NULL,
+ EXTENT_CLEAR_UNLOCK_PAGE |
+ EXTENT_CLEAR_DIRTY |
+ EXTENT_CLEAR_DELALLOC |
+ EXTENT_SET_WRITEBACK |
+ EXTENT_END_WRITEBACK);
+ if (!trans || IS_ERR(trans))
+ btrfs_error(root->fs_info, ret, "Failed to join transaction");
+ else
+ btrfs_abort_transaction(trans, root, ret);
+ goto free_pages_out;
}
/*
@@ -597,7 +619,7 @@ retry:
lock_extent(io_tree, async_extent->start,
async_extent->start +
- async_extent->ram_size - 1, GFP_NOFS);
+ async_extent->ram_size - 1);
/* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page,
@@ -606,6 +628,8 @@ retry:
async_extent->ram_size - 1,
&page_started, &nr_written, 0);
+ /* JDM XXX */
+
/*
* if page_started, cow_file_range inserted an
* inline extent and took care of all the unlocking
@@ -625,18 +649,21 @@ retry:
}
lock_extent(io_tree, async_extent->start,
- async_extent->start + async_extent->ram_size - 1,
- GFP_NOFS);
+ async_extent->start + async_extent->ram_size - 1);
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
- ret = btrfs_reserve_extent(trans, root,
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ } else {
+ trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+ ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
- 0, alloc_hint,
- (u64)-1, &ins, 1);
- btrfs_end_transaction(trans, root);
+ 0, alloc_hint, &ins, 1);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
+ btrfs_end_transaction(trans, root);
+ }
if (ret) {
int i;
@@ -649,8 +676,10 @@ retry:
async_extent->pages = NULL;
unlock_extent(io_tree, async_extent->start,
async_extent->start +
- async_extent->ram_size - 1, GFP_NOFS);
- goto retry;
+ async_extent->ram_size - 1);
+ if (ret == -ENOSPC)
+ goto retry;
+ goto out_free; /* JDM: Requeue? */
}
/*
@@ -662,7 +691,7 @@ retry:
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
- BUG_ON(!em);
+ BUG_ON(!em); /* -ENOMEM */
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
@@ -694,7 +723,7 @@ retry:
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
/*
* clear dirty, set writeback and unlock the pages.
@@ -716,13 +745,17 @@ retry:
ins.offset, async_extent->pages,
async_extent->nr_pages);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
-
- return 0;
+ ret = 0;
+out:
+ return ret;
+out_free:
+ kfree(async_extent);
+ goto out;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -791,7 +824,18 @@ static noinline int cow_file_range(struct inode *inode,
BUG_ON(btrfs_is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ extent_clear_unlock_delalloc(inode,
+ &BTRFS_I(inode)->io_tree,
+ start, end, NULL,
+ EXTENT_CLEAR_UNLOCK_PAGE |
+ EXTENT_CLEAR_UNLOCK |
+ EXTENT_CLEAR_DELALLOC |
+ EXTENT_CLEAR_DIRTY |
+ EXTENT_SET_WRITEBACK |
+ EXTENT_END_WRITEBACK);
+ return PTR_ERR(trans);
+ }
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -800,7 +844,8 @@ static noinline int cow_file_range(struct inode *inode,
ret = 0;
/* if this is a small write inside eof, kick off defrag */
- if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
+ if (num_bytes < 64 * 1024 &&
+ (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(trans, inode);
if (start == 0) {
@@ -821,8 +866,10 @@ static noinline int cow_file_range(struct inode *inode,
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
- ret = 0;
goto out;
+ } else if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_unlock;
}
}
@@ -838,11 +885,14 @@ static noinline int cow_file_range(struct inode *inode,
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
root->sectorsize, 0, alloc_hint,
- (u64)-1, &ins, 1);
- BUG_ON(ret);
+ &ins, 1);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_unlock;
+ }
em = alloc_extent_map();
- BUG_ON(!em);
+ BUG_ON(!em); /* -ENOMEM */
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
@@ -868,13 +918,16 @@ static noinline int cow_file_range(struct inode *inode,
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_unlock;
+ }
}
if (disk_num_bytes < cur_alloc_size)
@@ -899,11 +952,23 @@ static noinline int cow_file_range(struct inode *inode,
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
-out:
ret = 0;
+out:
btrfs_end_transaction(trans, root);
return ret;
+out_unlock:
+ extent_clear_unlock_delalloc(inode,
+ &BTRFS_I(inode)->io_tree,
+ start, end, NULL,
+ EXTENT_CLEAR_UNLOCK_PAGE |
+ EXTENT_CLEAR_UNLOCK |
+ EXTENT_CLEAR_DELALLOC |
+ EXTENT_CLEAR_DIRTY |
+ EXTENT_SET_WRITEBACK |
+ EXTENT_END_WRITEBACK);
+
+ goto out;
}
/*
@@ -969,7 +1034,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1, 0, NULL, GFP_NOFS);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
- BUG_ON(!async_cow);
+ BUG_ON(!async_cow); /* -ENOMEM */
async_cow->inode = inode;
async_cow->root = root;
async_cow->locked_page = locked_page;
@@ -1060,7 +1125,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 disk_bytenr;
u64 num_bytes;
int extent_type;
- int ret;
+ int ret, err;
int type;
int nocow;
int check_prev = 1;
@@ -1078,7 +1143,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
else
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
+
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
@@ -1086,7 +1155,10 @@ static noinline int run_delalloc_nocow(struct inode *inode,
while (1) {
ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
- BUG_ON(ret < 0);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto error;
+ }
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1100,8 +1172,10 @@ next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- BUG_ON(1);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto error;
+ }
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -1189,7 +1263,10 @@ out_check:
ret = cow_file_range(inode, locked_page, cow_start,
found_key.offset - 1, page_started,
nr_written, 1);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto error;
+ }
cow_start = (u64)-1;
}
@@ -1198,7 +1275,7 @@ out_check:
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map();
- BUG_ON(!em);
+ BUG_ON(!em); /* -ENOMEM */
em->start = cur_offset;
em->orig_start = em->start;
em->len = num_bytes;
@@ -1224,13 +1301,16 @@ out_check:
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
num_bytes, num_bytes, type);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, cur_offset,
num_bytes);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto error;
+ }
}
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
@@ -1249,18 +1329,23 @@ out_check:
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start, end,
page_started, nr_written, 1);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto error;
+ }
}
+error:
if (nolock) {
- ret = btrfs_end_transaction_nolock(trans, root);
- BUG_ON(ret);
+ err = btrfs_end_transaction_nolock(trans, root);
} else {
- ret = btrfs_end_transaction(trans, root);
- BUG_ON(ret);
+ err = btrfs_end_transaction(trans, root);
}
+ if (!ret)
+ ret = err;
+
btrfs_free_path(path);
- return 0;
+ return ret;
}
/*
@@ -1425,10 +1510,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
map_length = length;
ret = btrfs_map_block(map_tree, READ, logical,
&map_length, NULL, 0);
-
+ /* Will always return 0 or 1 with map_multi == NULL */
+ BUG_ON(ret < 0);
if (map_length < length + size)
return 1;
- return ret;
+ return 0;
}
/*
@@ -1448,7 +1534,7 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
int ret = 0;
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
return 0;
}
@@ -1479,14 +1565,16 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
int skip_sum;
+ int metadata = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
if (btrfs_is_free_space_inode(root, inode))
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
- else
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
- BUG_ON(ret);
+ metadata = 2;
+
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+ if (ret)
+ return ret;
if (!(rw & REQ_WRITE)) {
if (bio_flags & EXTENT_BIO_COMPRESSED) {
@@ -1571,7 +1659,7 @@ again:
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
/* already ordered? We're done */
if (PagePrivate2(page))
@@ -1675,13 +1763,15 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
*/
ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
&hint, 0);
- BUG_ON(ret);
+ if (ret)
+ goto out;
ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
- BUG_ON(ret);
+ if (ret)
+ goto out;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -1709,10 +1799,10 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos, &ins);
- BUG_ON(ret);
+out:
btrfs_free_path(path);
- return 0;
+ return ret;
}
/*
@@ -1740,35 +1830,41 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
end - start + 1);
if (!ret)
return 0;
- BUG_ON(!ordered_extent);
+ BUG_ON(!ordered_extent); /* Logic error */
nolock = btrfs_is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
- BUG_ON(!list_empty(&ordered_extent->list));
+ BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
- BUG_ON(ret);
+ if (ret) /* -ENOMEM or corruption */
+ btrfs_abort_transaction(trans, root, ret);
}
goto out;
}
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
- 0, &cached_state, GFP_NOFS);
+ 0, &cached_state);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out_unlock;
+ }
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1779,7 +1875,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
- BUG_ON(ret);
} else {
BUG_ON(root == root->fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
@@ -1793,11 +1888,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset,
ordered_extent->len);
- BUG_ON(ret);
}
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
@@ -1805,7 +1903,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
ret = btrfs_update_inode_fallback(trans, root, inode);
- BUG_ON(ret);
+ if (ret) { /* -ENOMEM or corruption */
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
}
ret = 0;
out:
@@ -1824,6 +1925,11 @@ out:
btrfs_put_ordered_extent(ordered_extent);
return 0;
+out_unlock:
+ unlock_extent_cached(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset +
+ ordered_extent->len - 1, &cached_state, GFP_NOFS);
+ goto out;
}
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
@@ -1905,6 +2011,8 @@ struct delayed_iput {
struct inode *inode;
};
+/* JDM: If this is fs-wide, why can't we add a pointer to
+ * btrfs_inode instead and avoid the allocation? */
void btrfs_add_delayed_iput(struct inode *inode)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
@@ -2051,20 +2159,27 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
}
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
- BUG_ON(ret && ret != -EEXIST);
+ if (ret && ret != -EEXIST) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
+ ret = 0;
}
/* insert an orphan item to track subvolume contains orphan files */
if (insert >= 2) {
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
- BUG_ON(ret);
+ if (ret && ret != -EEXIST) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
}
return 0;
}
@@ -2094,7 +2209,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
if (trans && delete_item) {
ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
}
if (release_rsv)
@@ -2228,7 +2343,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
}
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
btrfs_end_transaction(trans, root);
continue;
}
@@ -2610,16 +2725,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
"inode %llu parent %llu\n", name_len, name,
(unsigned long long)ino, (unsigned long long)dir_ino);
+ btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto err;
+ }
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
- BUG_ON(ret != 0 && ret != -ENOENT);
+ if (ret != 0 && ret != -ENOENT) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto err;
+ }
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
dir, index);
@@ -2777,7 +2898,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
err = ret;
goto out;
}
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
if (check_path_shared(root, path))
goto out;
btrfs_release_path(path);
@@ -2810,7 +2931,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
err = PTR_ERR(ref);
goto out;
}
- BUG_ON(!ref);
+ BUG_ON(!ref); /* Logic error */
if (check_path_shared(root, path))
goto out;
index = btrfs_inode_ref_index(path->nodes[0], ref);
@@ -2917,23 +3038,42 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
- BUG_ON(IS_ERR_OR_NULL(di));
+ if (IS_ERR_OR_NULL(di)) {
+ if (!di)
+ ret = -ENOENT;
+ else
+ ret = PTR_ERR(di);
+ goto out;
+ }
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
dir_ino, &index, name, name_len);
if (ret < 0) {
- BUG_ON(ret != -ENOENT);
+ if (ret != -ENOENT) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
- BUG_ON(IS_ERR_OR_NULL(di));
+ if (IS_ERR_OR_NULL(di)) {
+ if (!di)
+ ret = -ENOENT;
+ else
+ ret = PTR_ERR(di);
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
@@ -2943,15 +3083,19 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
- BUG_ON(ret);
-
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
+out:
btrfs_free_path(path);
- return 0;
+ return ret;
}
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -3161,8 +3305,8 @@ search_again:
}
size =
btrfs_file_extent_calc_inline_size(size);
- ret = btrfs_truncate_item(trans, root, path,
- size, 1);
+ btrfs_truncate_item(trans, root, path,
+ size, 1);
} else if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
@@ -3210,7 +3354,11 @@ delete:
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans,
+ root, ret);
+ goto error;
+ }
pending_del_nr = 0;
}
btrfs_release_path(path);
@@ -3223,8 +3371,10 @@ out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
- BUG_ON(ret);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
}
+error:
btrfs_free_path(path);
return err;
}
@@ -3282,8 +3432,7 @@ again:
}
wait_on_page_writeback(page);
- lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
@@ -3359,7 +3508,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
btrfs_wait_ordered_range(inode, hole_start,
block_end - hole_start);
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered)
break;
@@ -3372,7 +3521,10 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
- BUG_ON(IS_ERR_OR_NULL(em));
+ if (IS_ERR(em)) {
+ err = PTR_ERR(em);
+ break;
+ }
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3389,7 +3541,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
- btrfs_update_inode(trans, root, inode);
+ btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
@@ -3399,7 +3551,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
- btrfs_update_inode(trans, root, inode);
+ btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
@@ -3779,7 +3931,7 @@ static void inode_tree_del(struct inode *inode)
}
}
-int btrfs_invalidate_inodes(struct btrfs_root *root)
+void btrfs_invalidate_inodes(struct btrfs_root *root)
{
struct rb_node *node;
struct rb_node *prev;
@@ -3839,7 +3991,6 @@ again:
node = rb_next(node);
}
spin_unlock(&root->inode_lock);
- return 0;
}
static int btrfs_init_locked_inode(struct inode *inode, void *p)
@@ -4581,18 +4732,26 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
parent_ino, index);
}
- if (ret == 0) {
- ret = btrfs_insert_dir_item(trans, root, name, name_len,
- parent_inode, &key,
- btrfs_inode_type(inode), index);
- if (ret)
- goto fail_dir_item;
+ /* Nothing to clean up yet */
+ if (ret)
+ return ret;
- btrfs_i_size_write(parent_inode, parent_inode->i_size +
- name_len * 2);
- parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, root, parent_inode);
+ ret = btrfs_insert_dir_item(trans, root, name, name_len,
+ parent_inode, &key,
+ btrfs_inode_type(inode), index);
+ if (ret == -EEXIST)
+ goto fail_dir_item;
+ else if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
}
+
+ btrfs_i_size_write(parent_inode, parent_inode->i_size +
+ name_len * 2);
+ parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
+ ret = btrfs_update_inode(trans, root, parent_inode);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
return ret;
fail_dir_item:
@@ -4806,7 +4965,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
} else {
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
- BUG_ON(err);
+ if (err)
+ goto fail;
d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
@@ -5137,7 +5297,7 @@ again:
ret = uncompress_inline(path, inode, page,
pg_offset,
extent_offset, item);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
} else {
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
@@ -5252,6 +5412,7 @@ out:
free_extent_map(em);
return ERR_PTR(err);
}
+ BUG_ON(!em); /* Error is always set */
return em;
}
@@ -5414,7 +5575,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
- alloc_hint, (u64)-1, &ins, 1);
+ alloc_hint, &ins, 1);
if (ret) {
em = ERR_PTR(ret);
goto out;
@@ -5602,7 +5763,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
free_extent_map(em);
/* DIO will do one hole at a time, so just unlock a sector */
unlock_extent(&BTRFS_I(inode)->io_tree, start,
- start + root->sectorsize - 1, GFP_NOFS);
+ start + root->sectorsize - 1);
return 0;
}
@@ -5743,7 +5904,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
} while (bvec <= bvec_end);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
- dip->logical_offset + dip->bytes - 1, GFP_NOFS);
+ dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private;
kfree(dip->csums);
@@ -5794,7 +5955,7 @@ again:
lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
ret = btrfs_mark_extent_written(trans, inode,
@@ -5868,7 +6029,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
return 0;
}
@@ -6209,7 +6370,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
while (1) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- 0, &cached_state, GFP_NOFS);
+ 0, &cached_state);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
@@ -6233,7 +6394,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
if (writing) {
write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- EXTENT_DELALLOC, 0, NULL, &cached_state,
+ EXTENT_DELALLOC, NULL, &cached_state,
GFP_NOFS);
if (ret) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
@@ -6363,8 +6524,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
btrfs_releasepage(page, GFP_NOFS);
return;
}
- lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page));
if (ordered) {
@@ -6386,8 +6546,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
}
btrfs_put_ordered_extent(ordered);
cached_state = NULL;
- lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
}
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -6462,8 +6621,7 @@ again:
}
wait_on_page_writeback(page);
- lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
/*
@@ -6737,10 +6895,9 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, new_root, inode);
- BUG_ON(err);
iput(inode);
- return 0;
+ return err;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
@@ -6783,6 +6940,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
extent_map_tree_init(&ei->extent_tree);
extent_io_tree_init(&ei->io_tree, &inode->i_data);
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
+ ei->io_tree.track_uptodate = 1;
+ ei->io_failure_tree.track_uptodate = 1;
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
@@ -7072,7 +7231,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!ret)
ret = btrfs_update_inode(trans, root, old_inode);
}
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_fail;
+ }
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME;
@@ -7090,11 +7252,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dentry->d_name.name,
new_dentry->d_name.len);
}
- BUG_ON(ret);
- if (new_inode->i_nlink == 0) {
+ if (!ret && new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
BUG_ON(ret);
}
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_fail;
+ }
}
fixup_inode_flags(new_dir, old_inode);
@@ -7102,7 +7267,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_fail;
+ }
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = new_dentry->d_parent;
@@ -7315,7 +7483,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
}
ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
- 0, *alloc_hint, (u64)-1, &ins, 1);
+ 0, *alloc_hint, &ins, 1);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
@@ -7327,7 +7495,12 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
ins.offset, ins.offset,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ if (own_trans)
+ btrfs_end_transaction(trans, root);
+ break;
+ }
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
@@ -7349,7 +7522,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
}
ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ if (own_trans)
+ btrfs_end_transaction(trans, root);
+ break;
+ }
if (own_trans)
btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d8b54715c2de..18cc23d164a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -425,22 +425,37 @@ static noinline int create_subvol(struct btrfs_root *root,
key.offset = (u64)-1;
new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
- BUG_ON(IS_ERR(new_root));
+ if (IS_ERR(new_root)) {
+ btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
+ ret = PTR_ERR(new_root);
+ goto fail;
+ }
btrfs_record_root_in_trans(trans, new_root);
ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
+ if (ret) {
+ /* We potentially lose an unused inode item here */
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
+
/*
* insert the directory item
*/
ret = btrfs_set_inode_index(dir, &index);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
ret = btrfs_insert_dir_item(trans, root,
name, namelen, dir, &key,
BTRFS_FT_DIR, index);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto fail;
+ }
btrfs_i_size_write(dir, dir->i_size + namelen * 2);
ret = btrfs_update_inode(trans, root, dir);
@@ -769,6 +784,31 @@ none:
return -ENOENT;
}
+/*
+ * Validaty check of prev em and next em:
+ * 1) no prev/next em
+ * 2) prev/next em is an hole/inline extent
+ */
+static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
+{
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *prev = NULL, *next = NULL;
+ int ret = 0;
+
+ read_lock(&em_tree->lock);
+ prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
+ next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
+ read_unlock(&em_tree->lock);
+
+ if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
+ (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
+ ret = 1;
+ free_extent_map(prev);
+ free_extent_map(next);
+
+ return ret;
+}
+
static int should_defrag_range(struct inode *inode, u64 start, u64 len,
int thresh, u64 *last_len, u64 *skip,
u64 *defrag_end)
@@ -797,17 +837,25 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
if (!em) {
/* get the big lock and read metadata off disk */
- lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+ lock_extent(io_tree, start, start + len - 1);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+ unlock_extent(io_tree, start, start + len - 1);
if (IS_ERR(em))
return 0;
}
/* this will cover holes, and inline extents */
- if (em->block_start >= EXTENT_MAP_LAST_BYTE)
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
ret = 0;
+ goto out;
+ }
+
+ /* If we have nothing to merge with us, just skip. */
+ if (check_adjacent_extents(inode, em)) {
+ ret = 0;
+ goto out;
+ }
/*
* we hit a real extent, if it is big don't bother defragging it again
@@ -815,6 +863,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
ret = 0;
+out:
/*
* last_len ends up being a counter of how many bytes we've defragged.
* every time we choose not to defrag an extent, we reset *last_len
@@ -856,6 +905,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
u64 isize = i_size_read(inode);
u64 page_start;
u64 page_end;
+ u64 page_cnt;
int ret;
int i;
int i_done;
@@ -864,19 +914,21 @@ static int cluster_pages_for_defrag(struct inode *inode,
struct extent_io_tree *tree;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
- if (isize == 0)
- return 0;
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ if (!isize || start_index > file_end)
+ return 0;
+
+ page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
ret = btrfs_delalloc_reserve_space(inode,
- num_pages << PAGE_CACHE_SHIFT);
+ page_cnt << PAGE_CACHE_SHIFT);
if (ret)
return ret;
i_done = 0;
tree = &BTRFS_I(inode)->io_tree;
/* step one, lock all the pages */
- for (i = 0; i < num_pages; i++) {
+ for (i = 0; i < page_cnt; i++) {
struct page *page;
again:
page = find_or_create_page(inode->i_mapping,
@@ -887,10 +939,10 @@ again:
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
while (1) {
- lock_extent(tree, page_start, page_end, GFP_NOFS);
+ lock_extent(tree, page_start, page_end);
ordered = btrfs_lookup_ordered_extent(inode,
page_start);
- unlock_extent(tree, page_start, page_end, GFP_NOFS);
+ unlock_extent(tree, page_start, page_end);
if (!ordered)
break;
@@ -898,6 +950,15 @@ again:
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
lock_page(page);
+ /*
+ * we unlocked the page above, so we need check if
+ * it was released or not.
+ */
+ if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
}
if (!PageUptodate(page)) {
@@ -911,15 +972,6 @@ again:
}
}
- isize = i_size_read(inode);
- file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
- if (!isize || page->index > file_end) {
- /* whoops, we blew past eof, skip this page */
- unlock_page(page);
- page_cache_release(page);
- break;
- }
-
if (page->mapping != inode->i_mapping) {
unlock_page(page);
page_cache_release(page);
@@ -946,19 +998,18 @@ again:
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
lock_extent_bits(&BTRFS_I(inode)->io_tree,
- page_start, page_end - 1, 0, &cached_state,
- GFP_NOFS);
+ page_start, page_end - 1, 0, &cached_state);
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
GFP_NOFS);
- if (i_done != num_pages) {
+ if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode,
- (num_pages - i_done) << PAGE_CACHE_SHIFT);
+ (page_cnt - i_done) << PAGE_CACHE_SHIFT);
}
@@ -983,7 +1034,7 @@ out:
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
- btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT);
+ btrfs_delalloc_release_space(inode, page_cnt << PAGE_CACHE_SHIFT);
return ret;
}
@@ -1089,12 +1140,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!(inode->i_sb->s_flags & MS_ACTIVE))
break;
- if (!newer_than &&
- !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE,
- extent_thresh,
- &last_len, &skip,
- &defrag_end)) {
+ if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE, extent_thresh,
+ &last_len, &skip, &defrag_end)) {
unsigned long next;
/*
* the should_defrag function tells us how much to skip
@@ -1123,17 +1171,24 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
ra_index += max_cluster;
}
+ mutex_lock(&inode->i_mutex);
ret = cluster_pages_for_defrag(inode, pages, i, cluster);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&inode->i_mutex);
goto out_ra;
+ }
defrag_count += ret;
balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
+ mutex_unlock(&inode->i_mutex);
if (newer_than) {
if (newer_off == (u64)-1)
break;
+ if (ret > 0)
+ i += ret;
+
newer_off = max(newer_off + 1,
(u64)i << PAGE_CACHE_SHIFT);
@@ -1966,7 +2021,11 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
dest->root_key.objectid,
dentry->d_name.name,
dentry->d_name.len);
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_end_trans;
+ }
btrfs_record_root_in_trans(trans, dest);
@@ -1979,11 +2038,16 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
ret = btrfs_insert_orphan_item(trans,
root->fs_info->tree_root,
dest->root_key.objectid);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ err = ret;
+ goto out_end_trans;
+ }
}
-
+out_end_trans:
ret = btrfs_end_transaction(trans, root);
- BUG_ON(ret);
+ if (ret && !err)
+ err = ret;
inode->i_flags |= S_DEAD;
out_up_write:
up_write(&root->fs_info->subvol_sem);
@@ -2326,13 +2390,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
another, and lock file content */
while (1) {
struct btrfs_ordered_extent *ordered;
- lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
ordered = btrfs_lookup_first_ordered_extent(src, off+len);
if (!ordered &&
!test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
EXTENT_DELALLOC, 0, NULL))
break;
- unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
if (ordered)
btrfs_put_ordered_extent(ordered);
btrfs_wait_ordered_range(src, off, len);
@@ -2447,11 +2511,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.offset,
new_key.offset + datal,
&hint_byte, 1);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root,
+ ret);
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root,
+ ret);
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
leaf = path->nodes[0];
slot = path->slots[0];
@@ -2478,7 +2552,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_ino(inode),
new_key.offset - datao,
0);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans,
+ root,
+ ret);
+ btrfs_end_transaction(trans,
+ root);
+ goto out;
+
+ }
}
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
u64 skip = 0;
@@ -2503,11 +2585,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.offset,
new_key.offset + datal,
&hint_byte, 1);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root,
+ ret);
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root,
+ ret);
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
if (skip) {
u32 start =
@@ -2541,8 +2633,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_i_size_write(inode, endoff);
ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
- btrfs_end_transaction(trans, root);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
+ ret = btrfs_end_transaction(trans, root);
}
next:
btrfs_release_path(path);
@@ -2551,7 +2647,7 @@ next:
ret = 0;
out:
btrfs_release_path(path);
- unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
out_unlock:
mutex_unlock(&src->i_mutex);
mutex_unlock(&inode->i_mutex);
@@ -3066,8 +3162,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out;
extent_item_pos = loi->logical - key.objectid;
- ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
- extent_item_pos, build_ino_list,
+ ret = iterate_extent_inodes(root->fs_info, key.objectid,
+ extent_item_pos, 0, build_ino_list,
inodes);
if (ret < 0)
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5e178d8f7167..272f911203ff 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -208,7 +208,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
* take a spinning write lock. This will wait for both
* blocking readers or writers
*/
-int btrfs_tree_lock(struct extent_buffer *eb)
+void btrfs_tree_lock(struct extent_buffer *eb)
{
again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
@@ -230,13 +230,12 @@ again:
atomic_inc(&eb->spinning_writers);
atomic_inc(&eb->write_locks);
eb->lock_owner = current->pid;
- return 0;
}
/*
* drop a spinning or a blocking write lock.
*/
-int btrfs_tree_unlock(struct extent_buffer *eb)
+void btrfs_tree_unlock(struct extent_buffer *eb)
{
int blockers = atomic_read(&eb->blocking_writers);
@@ -255,7 +254,6 @@ int btrfs_tree_unlock(struct extent_buffer *eb)
atomic_dec(&eb->spinning_writers);
write_unlock(&eb->lock);
}
- return 0;
}
void btrfs_assert_tree_locked(struct extent_buffer *eb)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 17247ddb81a0..ca52681e5f40 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -24,8 +24,8 @@
#define BTRFS_WRITE_LOCK_BLOCKING 3
#define BTRFS_READ_LOCK_BLOCKING 4
-int btrfs_tree_lock(struct extent_buffer *eb);
-int btrfs_tree_unlock(struct extent_buffer *eb);
+void btrfs_tree_lock(struct extent_buffer *eb);
+void btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
void btrfs_tree_read_lock(struct extent_buffer *eb);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a1c940425307..bbf6d0d9aebe 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -59,6 +59,14 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
return NULL;
}
+static void ordered_data_tree_panic(struct inode *inode, int errno,
+ u64 offset)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
+ "%llu\n", (unsigned long long)offset);
+}
+
/*
* look for a given offset in the tree, and if it can't be found return the
* first lesser offset
@@ -207,7 +215,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
spin_lock(&tree->lock);
node = tree_insert(&tree->tree, file_offset,
&entry->rb_node);
- BUG_ON(node);
+ if (node)
+ ordered_data_tree_panic(inode, -EEXIST, file_offset);
spin_unlock(&tree->lock);
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
@@ -215,7 +224,6 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&BTRFS_I(inode)->root->fs_info->ordered_extents);
spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
- BUG_ON(node);
return 0;
}
@@ -249,9 +257,9 @@ int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
* when an ordered extent is finished. If the list covers more than one
* ordered extent, it is split across multiples.
*/
-int btrfs_add_ordered_sum(struct inode *inode,
- struct btrfs_ordered_extent *entry,
- struct btrfs_ordered_sum *sum)
+void btrfs_add_ordered_sum(struct inode *inode,
+ struct btrfs_ordered_extent *entry,
+ struct btrfs_ordered_sum *sum)
{
struct btrfs_ordered_inode_tree *tree;
@@ -259,7 +267,6 @@ int btrfs_add_ordered_sum(struct inode *inode,
spin_lock(&tree->lock);
list_add_tail(&sum->list, &entry->list);
spin_unlock(&tree->lock);
- return 0;
}
/*
@@ -384,7 +391,7 @@ out:
* used to drop a reference on an ordered extent. This will free
* the extent if the last reference is dropped
*/
-int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
+void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
{
struct list_head *cur;
struct btrfs_ordered_sum *sum;
@@ -400,7 +407,6 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
}
kfree(entry);
}
- return 0;
}
/*
@@ -408,8 +414,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
* and you must wake_up entry->wait. You must hold the tree lock
* while you call this function.
*/
-static int __btrfs_remove_ordered_extent(struct inode *inode,
- struct btrfs_ordered_extent *entry)
+static void __btrfs_remove_ordered_extent(struct inode *inode,
+ struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -436,35 +442,30 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
list_del_init(&BTRFS_I(inode)->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
-
- return 0;
}
/*
* remove an ordered extent from the tree. No references are dropped
* but any waiters are woken.
*/
-int btrfs_remove_ordered_extent(struct inode *inode,
- struct btrfs_ordered_extent *entry)
+void btrfs_remove_ordered_extent(struct inode *inode,
+ struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
- int ret;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
- ret = __btrfs_remove_ordered_extent(inode, entry);
+ __btrfs_remove_ordered_extent(inode, entry);
spin_unlock(&tree->lock);
wake_up(&entry->wait);
-
- return ret;
}
/*
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
-int btrfs_wait_ordered_extents(struct btrfs_root *root,
- int nocow_only, int delay_iput)
+void btrfs_wait_ordered_extents(struct btrfs_root *root,
+ int nocow_only, int delay_iput)
{
struct list_head splice;
struct list_head *cur;
@@ -512,7 +513,6 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
spin_lock(&root->fs_info->ordered_extent_lock);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
- return 0;
}
/*
@@ -525,7 +525,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
* extra check to make sure the ordered operation list really is empty
* before we return
*/
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
{
struct btrfs_inode *btrfs_inode;
struct inode *inode;
@@ -573,8 +573,6 @@ again:
spin_unlock(&root->fs_info->ordered_extent_lock);
mutex_unlock(&root->fs_info->ordered_operations_mutex);
-
- return 0;
}
/*
@@ -609,7 +607,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
/*
* Used to wait on ordered extents across a large range of bytes.
*/
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
{
u64 end;
u64 orig_end;
@@ -664,7 +662,6 @@ again:
schedule_timeout(1);
goto again;
}
- return 0;
}
/*
@@ -948,9 +945,8 @@ out:
* If trans is not null, we'll do a friendly check for a transaction that
* is already flushing things and force the IO down ourselves.
*/
-int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode)
+void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode)
{
u64 last_mod;
@@ -961,7 +957,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
* commit, we can safely return without doing anything
*/
if (last_mod < root->fs_info->last_trans_committed)
- return 0;
+ return;
/*
* the transaction is already committing. Just start the IO and
@@ -969,7 +965,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
*/
if (trans && root->fs_info->running_transaction->blocked) {
btrfs_wait_ordered_range(inode, 0, (u64)-1);
- return 0;
+ return;
}
spin_lock(&root->fs_info->ordered_extent_lock);
@@ -978,6 +974,4 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
&root->fs_info->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
-
- return 0;
}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index ff1f69aa1883..c355ad4dc1a6 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -138,8 +138,8 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
t->last = NULL;
}
-int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
-int btrfs_remove_ordered_extent(struct inode *inode,
+void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
+void btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry);
int btrfs_dec_test_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
@@ -154,14 +154,14 @@ int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len,
int type, int compress_type);
-int btrfs_add_ordered_sum(struct inode *inode,
- struct btrfs_ordered_extent *entry,
- struct btrfs_ordered_sum *sum);
+void btrfs_add_ordered_sum(struct inode *inode,
+ struct btrfs_ordered_extent *entry,
+ struct btrfs_ordered_sum *sum);
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
u64 file_offset);
void btrfs_start_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry, int wait);
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
@@ -170,10 +170,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
-int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode);
-int btrfs_wait_ordered_extents(struct btrfs_root *root,
- int nocow_only, int delay_iput);
+void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
+void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode);
+void btrfs_wait_ordered_extents(struct btrfs_root *root,
+ int nocow_only, int delay_iput);
#endif
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index f8be250963a0..24cad1695af7 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -58,7 +58,7 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
goto out;
- if (ret) {
+ if (ret) { /* JDM: Really? */
ret = -ENOENT;
goto out;
}
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 22db04550f6a..dc5d33146fdb 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -54,7 +54,6 @@
* than the 2 started one after another.
*/
-#define MAX_MIRRORS 2
#define MAX_IN_FLIGHT 6
struct reada_extctl {
@@ -71,7 +70,7 @@ struct reada_extent {
struct list_head extctl;
struct kref refcnt;
spinlock_t lock;
- struct reada_zone *zones[MAX_MIRRORS];
+ struct reada_zone *zones[BTRFS_MAX_MIRRORS];
int nzones;
struct btrfs_device *scheduled_for;
};
@@ -84,7 +83,8 @@ struct reada_zone {
spinlock_t lock;
int locked;
struct btrfs_device *device;
- struct btrfs_device *devs[MAX_MIRRORS]; /* full list, incl self */
+ struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
+ * self */
int ndevs;
struct kref refcnt;
};
@@ -365,9 +365,9 @@ again:
if (ret || !bbio || length < blocksize)
goto error;
- if (bbio->num_stripes > MAX_MIRRORS) {
+ if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
printk(KERN_ERR "btrfs readahead: more than %d copies not "
- "supported", MAX_MIRRORS);
+ "supported", BTRFS_MAX_MIRRORS);
goto error;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8c1aae2c845d..017281dbb2a7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -326,6 +326,19 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
return NULL;
}
+void backref_tree_panic(struct rb_node *rb_node, int errno,
+ u64 bytenr)
+{
+
+ struct btrfs_fs_info *fs_info = NULL;
+ struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
+ rb_node);
+ if (bnode->root)
+ fs_info = bnode->root->fs_info;
+ btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
+ "found at offset %llu\n", (unsigned long long)bytenr);
+}
+
/*
* walk up backref nodes until reach node presents tree root
*/
@@ -452,7 +465,8 @@ static void update_backref_node(struct backref_cache *cache,
rb_erase(&node->rb_node, &cache->rb_root);
node->bytenr = bytenr;
rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, bytenr);
}
/*
@@ -999,7 +1013,8 @@ next:
if (!cowonly) {
rb_node = tree_insert(&cache->rb_root, node->bytenr,
&node->rb_node);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, node->bytenr);
list_add_tail(&node->lower, &cache->leaves);
}
@@ -1034,7 +1049,9 @@ next:
if (!cowonly) {
rb_node = tree_insert(&cache->rb_root, upper->bytenr,
&upper->rb_node);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST,
+ upper->bytenr);
}
list_add_tail(&edge->list[UPPER], &upper->lower);
@@ -1180,7 +1197,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
&new_node->rb_node);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
if (!new_node->lowest) {
list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
@@ -1203,14 +1221,15 @@ fail:
/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
-static int __add_reloc_root(struct btrfs_root *root)
+static int __must_check __add_reloc_root(struct btrfs_root *root)
{
struct rb_node *rb_node;
struct mapping_node *node;
struct reloc_control *rc = root->fs_info->reloc_ctl;
node = kmalloc(sizeof(*node), GFP_NOFS);
- BUG_ON(!node);
+ if (!node)
+ return -ENOMEM;
node->bytenr = root->node->start;
node->data = root;
@@ -1219,7 +1238,12 @@ static int __add_reloc_root(struct btrfs_root *root)
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
- BUG_ON(rb_node);
+ if (rb_node) {
+ kfree(node);
+ btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
+ "for start=%llu while inserting into relocation "
+ "tree\n");
+ }
list_add_tail(&root->root_list, &rc->reloc_roots);
return 0;
@@ -1252,7 +1276,8 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, node->bytenr);
} else {
list_del_init(&root->root_list);
kfree(node);
@@ -1334,6 +1359,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *reloc_root;
struct reloc_control *rc = root->fs_info->reloc_ctl;
int clear_rsv = 0;
+ int ret;
if (root->reloc_root) {
reloc_root = root->reloc_root;
@@ -1353,7 +1379,8 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
if (clear_rsv)
trans->block_rsv = NULL;
- __add_reloc_root(reloc_root);
+ ret = __add_reloc_root(reloc_root);
+ BUG_ON(ret < 0);
root->reloc_root = reloc_root;
return 0;
}
@@ -1577,15 +1604,14 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
WARN_ON(!IS_ALIGNED(end, root->sectorsize));
end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end,
- GFP_NOFS);
+ key.offset, end);
if (!ret)
continue;
btrfs_drop_extent_cache(inode, key.offset, end,
1);
unlock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end, GFP_NOFS);
+ key.offset, end);
}
}
@@ -1956,9 +1982,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
}
/* the lock_extent waits for readpage to complete */
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ lock_extent(&BTRFS_I(inode)->io_tree, start, end);
btrfs_drop_extent_cache(inode, start, end, 1);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
}
return 0;
}
@@ -2246,7 +2272,8 @@ again:
} else {
list_del_init(&reloc_root->root_list);
}
- btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
+ ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
+ BUG_ON(ret < 0);
}
if (found) {
@@ -2862,12 +2889,12 @@ int prealloc_file_extent_cluster(struct inode *inode,
else
end = cluster->end - offset;
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ lock_extent(&BTRFS_I(inode)->io_tree, start, end);
num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
if (ret)
break;
nr++;
@@ -2899,7 +2926,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ lock_extent(&BTRFS_I(inode)->io_tree, start, end);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
@@ -2910,7 +2937,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
}
btrfs_drop_extent_cache(inode, start, end, 0);
}
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
return ret;
}
@@ -2990,8 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(&BTRFS_I(inode)->io_tree,
- page_start, page_end, GFP_NOFS);
+ lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
set_page_extent_mapped(page);
@@ -3007,7 +3033,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
- page_start, page_end, GFP_NOFS);
+ page_start, page_end);
unlock_page(page);
page_cache_release(page);
@@ -3154,7 +3180,8 @@ static int add_tree_block(struct reloc_control *rc,
block->key_ready = 0;
rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, block->bytenr);
return 0;
}
@@ -3426,7 +3453,9 @@ static int find_data_references(struct reloc_control *rc,
block->key_ready = 1;
rb_node = tree_insert(blocks, block->bytenr,
&block->rb_node);
- BUG_ON(rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST,
+ block->bytenr);
}
if (counted)
added = 1;
@@ -4073,10 +4102,11 @@ out:
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
{
struct btrfs_trans_handle *trans;
- int ret;
+ int ret, err;
trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
memset(&root->root_item.drop_progress, 0,
sizeof(root->root_item.drop_progress));
@@ -4084,11 +4114,11 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
btrfs_set_root_refs(&root->root_item, 0);
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
- BUG_ON(ret);
- ret = btrfs_end_transaction(trans, root->fs_info->tree_root);
- BUG_ON(ret);
- return 0;
+ err = btrfs_end_transaction(trans, root->fs_info->tree_root);
+ if (err)
+ return err;
+ return ret;
}
/*
@@ -4156,7 +4186,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
err = ret;
goto out;
}
- mark_garbage_root(reloc_root);
+ ret = mark_garbage_root(reloc_root);
+ if (ret < 0) {
+ err = ret;
+ goto out;
+ }
}
}
@@ -4202,13 +4236,19 @@ int btrfs_recover_relocation(struct btrfs_root *root)
fs_root = read_fs_root(root->fs_info,
reloc_root->root_key.offset);
- BUG_ON(IS_ERR(fs_root));
+ if (IS_ERR(fs_root)) {
+ err = PTR_ERR(fs_root);
+ goto out_free;
+ }
- __add_reloc_root(reloc_root);
+ err = __add_reloc_root(reloc_root);
+ BUG_ON(err < 0); /* -ENOMEM or logic error */
fs_root->reloc_root = reloc_root;
}
- btrfs_commit_transaction(trans, rc->extent_root);
+ err = btrfs_commit_transaction(trans, rc->extent_root);
+ if (err)
+ goto out_free;
merge_reloc_roots(rc);
@@ -4218,7 +4258,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
if (IS_ERR(trans))
err = PTR_ERR(trans);
else
- btrfs_commit_transaction(trans, rc->extent_root);
+ err = btrfs_commit_transaction(trans, rc->extent_root);
out_free:
kfree(rc);
out:
@@ -4267,6 +4307,8 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
disk_bytenr + len - 1, &list, 0);
+ if (ret)
+ goto out;
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
@@ -4284,6 +4326,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
btrfs_add_ordered_sum(inode, ordered, sums);
}
+out:
btrfs_put_ordered_extent(ordered);
return ret;
}
@@ -4380,7 +4423,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
* called after snapshot is created. migrate block reservation
* and create reloc root for the newly created snapshot
*/
-void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending)
{
struct btrfs_root *root = pending->root;
@@ -4390,7 +4433,7 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
int ret;
if (!root->reloc_root)
- return;
+ return 0;
rc = root->fs_info->reloc_ctl;
rc->merging_rsv_size += rc->nodes_relocated;
@@ -4399,18 +4442,21 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
rc->block_rsv,
rc->nodes_relocated);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
new_root = pending->snap;
reloc_root = create_reloc_root(trans, root->reloc_root,
new_root->root_key.objectid);
+ if (IS_ERR(reloc_root))
+ return PTR_ERR(reloc_root);
- __add_reloc_root(reloc_root);
+ ret = __add_reloc_root(reloc_root);
+ BUG_ON(ret < 0);
new_root->reloc_root = reloc_root;
- if (rc->create_reloc_tree) {
+ if (rc->create_reloc_tree)
ret = clone_backref_node(trans, rc, root, reloc_root);
- BUG_ON(ret);
- }
+ return ret;
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f4099904565a..24fb8ce4e071 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -93,10 +93,14 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
unsigned long ptr;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
goto out;
+ }
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
@@ -116,13 +120,10 @@ out:
return ret;
}
-int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_key *key, struct btrfs_root_item
- *item)
+int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_root_item *item)
{
- int ret;
- ret = btrfs_insert_item(trans, root, key, item, sizeof(*item));
- return ret;
+ return btrfs_insert_item(trans, root, key, item, sizeof(*item));
}
/*
@@ -384,6 +385,8 @@ int btrfs_find_root_ref(struct btrfs_root *tree_root,
*
* For a back ref the root_id is the id of the subvol or snapshot and
* ref_id is the id of the tree referencing it.
+ *
+ * Will return 0, -ENOMEM, or anything from the CoW path
*/
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
@@ -407,7 +410,11 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
again:
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
sizeof(*ref) + name_len);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_free_path(path);
+ return ret;
+ }
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 390e7102b0ff..90acc82046c3 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -36,37 +36,30 @@
* Future enhancements:
* - In case an unrepairable extent is encountered, track which files are
* affected and report them
- * - In case of a read error on files with nodatasum, map the file and read
- * the extent to trigger a writeback of the good copy
* - track and record media errors, throw out bad devices
* - add a mode to also read unallocated space
*/
-struct scrub_bio;
-struct scrub_page;
+struct scrub_block;
struct scrub_dev;
-static void scrub_bio_end_io(struct bio *bio, int err);
-static void scrub_checksum(struct btrfs_work *work);
-static int scrub_checksum_data(struct scrub_dev *sdev,
- struct scrub_page *spag, void *buffer);
-static int scrub_checksum_tree_block(struct scrub_dev *sdev,
- struct scrub_page *spag, u64 logical,
- void *buffer);
-static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
-static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
-static void scrub_fixup_end_io(struct bio *bio, int err);
-static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
- struct page *page);
-static void scrub_fixup(struct scrub_bio *sbio, int ix);
#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
+#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
struct scrub_page {
+ struct scrub_block *sblock;
+ struct page *page;
+ struct block_device *bdev;
u64 flags; /* extent flags */
u64 generation;
- int mirror_num;
- int have_csum;
+ u64 logical;
+ u64 physical;
+ struct {
+ unsigned int mirror_num:8;
+ unsigned int have_csum:1;
+ unsigned int io_error:1;
+ };
u8 csum[BTRFS_CSUM_SIZE];
};
@@ -77,12 +70,25 @@ struct scrub_bio {
int err;
u64 logical;
u64 physical;
- struct scrub_page spag[SCRUB_PAGES_PER_BIO];
- u64 count;
+ struct scrub_page *pagev[SCRUB_PAGES_PER_BIO];
+ int page_count;
int next_free;
struct btrfs_work work;
};
+struct scrub_block {
+ struct scrub_page pagev[SCRUB_MAX_PAGES_PER_BLOCK];
+ int page_count;
+ atomic_t outstanding_pages;
+ atomic_t ref_count; /* free mem on transition to zero */
+ struct scrub_dev *sdev;
+ struct {
+ unsigned int header_error:1;
+ unsigned int checksum_error:1;
+ unsigned int no_io_error_seen:1;
+ };
+};
+
struct scrub_dev {
struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
struct btrfs_device *dev;
@@ -96,6 +102,10 @@ struct scrub_dev {
struct list_head csum_list;
atomic_t cancel_req;
int readonly;
+ int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
+ u32 sectorsize;
+ u32 nodesize;
+ u32 leafsize;
/*
* statistics
*/
@@ -124,6 +134,43 @@ struct scrub_warning {
int scratch_bufsize;
};
+
+static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
+static int scrub_setup_recheck_block(struct scrub_dev *sdev,
+ struct btrfs_mapping_tree *map_tree,
+ u64 length, u64 logical,
+ struct scrub_block *sblock);
+static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
+ struct scrub_block *sblock, int is_metadata,
+ int have_csum, u8 *csum, u64 generation,
+ u16 csum_size);
+static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
+ struct scrub_block *sblock,
+ int is_metadata, int have_csum,
+ const u8 *csum, u64 generation,
+ u16 csum_size);
+static void scrub_complete_bio_end_io(struct bio *bio, int err);
+static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
+ struct scrub_block *sblock_good,
+ int force_write);
+static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
+ struct scrub_block *sblock_good,
+ int page_num, int force_write);
+static int scrub_checksum_data(struct scrub_block *sblock);
+static int scrub_checksum_tree_block(struct scrub_block *sblock);
+static int scrub_checksum_super(struct scrub_block *sblock);
+static void scrub_block_get(struct scrub_block *sblock);
+static void scrub_block_put(struct scrub_block *sblock);
+static int scrub_add_page_to_bio(struct scrub_dev *sdev,
+ struct scrub_page *spage);
+static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
+ u64 physical, u64 flags, u64 gen, int mirror_num,
+ u8 *csum, int force);
+static void scrub_bio_end_io(struct bio *bio, int err);
+static void scrub_bio_end_io_worker(struct btrfs_work *work);
+static void scrub_block_complete(struct scrub_block *sblock);
+
+
static void scrub_free_csums(struct scrub_dev *sdev)
{
while (!list_empty(&sdev->csum_list)) {
@@ -135,23 +182,6 @@ static void scrub_free_csums(struct scrub_dev *sdev)
}
}
-static void scrub_free_bio(struct bio *bio)
-{
- int i;
- struct page *last_page = NULL;
-
- if (!bio)
- return;
-
- for (i = 0; i < bio->bi_vcnt; ++i) {
- if (bio->bi_io_vec[i].bv_page == last_page)
- continue;
- last_page = bio->bi_io_vec[i].bv_page;
- __free_page(last_page);
- }
- bio_put(bio);
-}
-
static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
{
int i;
@@ -159,13 +189,23 @@ static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
if (!sdev)
return;
+ /* this can happen when scrub is cancelled */
+ if (sdev->curr != -1) {
+ struct scrub_bio *sbio = sdev->bios[sdev->curr];
+
+ for (i = 0; i < sbio->page_count; i++) {
+ BUG_ON(!sbio->pagev[i]);
+ BUG_ON(!sbio->pagev[i]->page);
+ scrub_block_put(sbio->pagev[i]->sblock);
+ }
+ bio_put(sbio->bio);
+ }
+
for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
struct scrub_bio *sbio = sdev->bios[i];
if (!sbio)
break;
-
- scrub_free_bio(sbio->bio);
kfree(sbio);
}
@@ -179,11 +219,16 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
struct scrub_dev *sdev;
int i;
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+ int pages_per_bio;
+ pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
+ bio_get_nr_vecs(dev->bdev));
sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
if (!sdev)
goto nomem;
sdev->dev = dev;
+ sdev->pages_per_bio = pages_per_bio;
+ sdev->curr = -1;
for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
struct scrub_bio *sbio;
@@ -194,8 +239,8 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
sbio->index = i;
sbio->sdev = sdev;
- sbio->count = 0;
- sbio->work.func = scrub_checksum;
+ sbio->page_count = 0;
+ sbio->work.func = scrub_bio_end_io_worker;
if (i != SCRUB_BIOS_PER_DEV-1)
sdev->bios[i]->next_free = i + 1;
@@ -203,7 +248,9 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
sdev->bios[i]->next_free = -1;
}
sdev->first_free = 0;
- sdev->curr = -1;
+ sdev->nodesize = dev->dev_root->nodesize;
+ sdev->leafsize = dev->dev_root->leafsize;
+ sdev->sectorsize = dev->dev_root->sectorsize;
atomic_set(&sdev->in_flight, 0);
atomic_set(&sdev->fixup_cnt, 0);
atomic_set(&sdev->cancel_req, 0);
@@ -294,10 +341,9 @@ err:
return 0;
}
-static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
- int ix)
+static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
{
- struct btrfs_device *dev = sbio->sdev->dev;
+ struct btrfs_device *dev = sblock->sdev->dev;
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
struct btrfs_path *path;
struct btrfs_key found_key;
@@ -316,8 +362,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
- swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
- swarn.logical = sbio->logical + ix * PAGE_SIZE;
+ BUG_ON(sblock->page_count < 1);
+ swarn.sector = (sblock->pagev[0].physical) >> 9;
+ swarn.logical = sblock->pagev[0].logical;
swarn.errstr = errstr;
swarn.dev = dev;
swarn.msg_bufsize = bufsize;
@@ -342,7 +389,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
do {
ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
&ref_root, &ref_level);
- printk(KERN_WARNING "%s at logical %llu on dev %s, "
+ printk(KERN_WARNING
+ "btrfs: %s at logical %llu on dev %s, "
"sector %llu: metadata %s (level %d) in tree "
"%llu\n", errstr, swarn.logical, dev->name,
(unsigned long long)swarn.sector,
@@ -352,8 +400,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
} while (ret != 1);
} else {
swarn.path = path;
- iterate_extent_inodes(fs_info, path, found_key.objectid,
- extent_item_pos,
+ iterate_extent_inodes(fs_info, found_key.objectid,
+ extent_item_pos, 1,
scrub_print_warning_inode, &swarn);
}
@@ -531,9 +579,9 @@ out:
spin_lock(&sdev->stat_lock);
++sdev->stat.uncorrectable_errors;
spin_unlock(&sdev->stat_lock);
- printk_ratelimited(KERN_ERR "btrfs: unable to fixup "
- "(nodatasum) error at logical %llu\n",
- fixup->logical);
+ printk_ratelimited(KERN_ERR
+ "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
+ (unsigned long long)fixup->logical, sdev->dev->name);
}
btrfs_free_path(path);
@@ -550,91 +598,168 @@ out:
}
/*
- * scrub_recheck_error gets called when either verification of the page
- * failed or the bio failed to read, e.g. with EIO. In the latter case,
- * recheck_error gets called for every page in the bio, even though only
- * one may be bad
+ * scrub_handle_errored_block gets called when either verification of the
+ * pages failed or the bio failed to read, e.g. with EIO. In the latter
+ * case, this function handles all pages in the bio, even though only one
+ * may be bad.
+ * The goal of this function is to repair the errored block by using the
+ * contents of one of the mirrors.
*/
-static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
+static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
{
- struct scrub_dev *sdev = sbio->sdev;
- u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
+ struct scrub_dev *sdev = sblock_to_check->sdev;
+ struct btrfs_fs_info *fs_info;
+ u64 length;
+ u64 logical;
+ u64 generation;
+ unsigned int failed_mirror_index;
+ unsigned int is_metadata;
+ unsigned int have_csum;
+ u8 *csum;
+ struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
+ struct scrub_block *sblock_bad;
+ int ret;
+ int mirror_index;
+ int page_num;
+ int success;
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
+ DEFAULT_RATELIMIT_BURST);
+
+ BUG_ON(sblock_to_check->page_count < 1);
+ fs_info = sdev->dev->dev_root->fs_info;
+ length = sblock_to_check->page_count * PAGE_SIZE;
+ logical = sblock_to_check->pagev[0].logical;
+ generation = sblock_to_check->pagev[0].generation;
+ BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
+ failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
+ is_metadata = !(sblock_to_check->pagev[0].flags &
+ BTRFS_EXTENT_FLAG_DATA);
+ have_csum = sblock_to_check->pagev[0].have_csum;
+ csum = sblock_to_check->pagev[0].csum;
- if (sbio->err) {
- if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
- sbio->bio->bi_io_vec[ix].bv_page) == 0) {
- if (scrub_fixup_check(sbio, ix) == 0)
- return 0;
- }
- if (__ratelimit(&_rs))
- scrub_print_warning("i/o error", sbio, ix);
- } else {
- if (__ratelimit(&_rs))
- scrub_print_warning("checksum error", sbio, ix);
+ /*
+ * read all mirrors one after the other. This includes to
+ * re-read the extent or metadata block that failed (that was
+ * the cause that this fixup code is called) another time,
+ * page by page this time in order to know which pages
+ * caused I/O errors and which ones are good (for all mirrors).
+ * It is the goal to handle the situation when more than one
+ * mirror contains I/O errors, but the errors do not
+ * overlap, i.e. the data can be repaired by selecting the
+ * pages from those mirrors without I/O error on the
+ * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
+ * would be that mirror #1 has an I/O error on the first page,
+ * the second page is good, and mirror #2 has an I/O error on
+ * the second page, but the first page is good.
+ * Then the first page of the first mirror can be repaired by
+ * taking the first page of the second mirror, and the
+ * second page of the second mirror can be repaired by
+ * copying the contents of the 2nd page of the 1st mirror.
+ * One more note: if the pages of one mirror contain I/O
+ * errors, the checksum cannot be verified. In order to get
+ * the best data for repairing, the first attempt is to find
+ * a mirror without I/O errors and with a validated checksum.
+ * Only if this is not possible, the pages are picked from
+ * mirrors with I/O errors without considering the checksum.
+ * If the latter is the case, at the end, the checksum of the
+ * repaired area is verified in order to correctly maintain
+ * the statistics.
+ */
+
+ sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
+ sizeof(*sblocks_for_recheck),
+ GFP_NOFS);
+ if (!sblocks_for_recheck) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.malloc_errors++;
+ sdev->stat.read_errors++;
+ sdev->stat.uncorrectable_errors++;
+ spin_unlock(&sdev->stat_lock);
+ goto out;
}
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.read_errors;
- spin_unlock(&sdev->stat_lock);
+ /* setup the context, map the logical blocks and alloc the pages */
+ ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
+ logical, sblocks_for_recheck);
+ if (ret) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.read_errors++;
+ sdev->stat.uncorrectable_errors++;
+ spin_unlock(&sdev->stat_lock);
+ goto out;
+ }
+ BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
+ sblock_bad = sblocks_for_recheck + failed_mirror_index;
- scrub_fixup(sbio, ix);
- return 1;
-}
+ /* build and submit the bios for the failed mirror, check checksums */
+ ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
+ csum, generation, sdev->csum_size);
+ if (ret) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.read_errors++;
+ sdev->stat.uncorrectable_errors++;
+ spin_unlock(&sdev->stat_lock);
+ goto out;
+ }
-static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
-{
- int ret = 1;
- struct page *page;
- void *buffer;
- u64 flags = sbio->spag[ix].flags;
+ if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
+ sblock_bad->no_io_error_seen) {
+ /*
+ * the error disappeared after reading page by page, or
+ * the area was part of a huge bio and other parts of the
+ * bio caused I/O errors, or the block layer merged several
+ * read requests into one and the error is caused by a
+ * different bio (usually one of the two latter cases is
+ * the cause)
+ */
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.unverified_errors++;
+ spin_unlock(&sdev->stat_lock);
- page = sbio->bio->bi_io_vec[ix].bv_page;
- buffer = kmap_atomic(page);
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- ret = scrub_checksum_data(sbio->sdev,
- sbio->spag + ix, buffer);
- } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- ret = scrub_checksum_tree_block(sbio->sdev,
- sbio->spag + ix,
- sbio->logical + ix * PAGE_SIZE,
- buffer);
- } else {
- WARN_ON(1);
+ goto out;
}
- kunmap_atomic(buffer);
- return ret;
-}
+ if (!sblock_bad->no_io_error_seen) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.read_errors++;
+ spin_unlock(&sdev->stat_lock);
+ if (__ratelimit(&_rs))
+ scrub_print_warning("i/o error", sblock_to_check);
+ } else if (sblock_bad->checksum_error) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.csum_errors++;
+ spin_unlock(&sdev->stat_lock);
+ if (__ratelimit(&_rs))
+ scrub_print_warning("checksum error", sblock_to_check);
+ } else if (sblock_bad->header_error) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.verify_errors++;
+ spin_unlock(&sdev->stat_lock);
+ if (__ratelimit(&_rs))
+ scrub_print_warning("checksum/header error",
+ sblock_to_check);
+ }
-static void scrub_fixup_end_io(struct bio *bio, int err)
-{
- complete((struct completion *)bio->bi_private);
-}
+ if (sdev->readonly)
+ goto did_not_correct_error;
-static void scrub_fixup(struct scrub_bio *sbio, int ix)
-{
- struct scrub_dev *sdev = sbio->sdev;
- struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
- struct btrfs_bio *bbio = NULL;
- struct scrub_fixup_nodatasum *fixup;
- u64 logical = sbio->logical + ix * PAGE_SIZE;
- u64 length;
- int i;
- int ret;
- DECLARE_COMPLETION_ONSTACK(complete);
-
- if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
- (sbio->spag[ix].have_csum == 0)) {
- fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
- if (!fixup)
- goto uncorrectable;
- fixup->sdev = sdev;
- fixup->logical = logical;
- fixup->root = fs_info->extent_root;
- fixup->mirror_num = sbio->spag[ix].mirror_num;
+ if (!is_metadata && !have_csum) {
+ struct scrub_fixup_nodatasum *fixup_nodatasum;
+
+ /*
+ * !is_metadata and !have_csum, this means that the data
+ * might not be COW'ed, that it might be modified
+ * concurrently. The general strategy to work on the
+ * commit root does not help in the case when COW is not
+ * used.
+ */
+ fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
+ if (!fixup_nodatasum)
+ goto did_not_correct_error;
+ fixup_nodatasum->sdev = sdev;
+ fixup_nodatasum->logical = logical;
+ fixup_nodatasum->root = fs_info->extent_root;
+ fixup_nodatasum->mirror_num = failed_mirror_index + 1;
/*
* increment scrubs_running to prevent cancel requests from
* completing as long as a fixup worker is running. we must also
@@ -649,235 +774,528 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
atomic_inc(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
atomic_inc(&sdev->fixup_cnt);
- fixup->work.func = scrub_fixup_nodatasum;
- btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work);
- return;
+ fixup_nodatasum->work.func = scrub_fixup_nodatasum;
+ btrfs_queue_worker(&fs_info->scrub_workers,
+ &fixup_nodatasum->work);
+ goto out;
}
- length = PAGE_SIZE;
- ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
- &bbio, 0);
- if (ret || !bbio || length < PAGE_SIZE) {
- printk(KERN_ERR
- "scrub_fixup: btrfs_map_block failed us for %llu\n",
- (unsigned long long)logical);
- WARN_ON(1);
- kfree(bbio);
- return;
+ /*
+ * now build and submit the bios for the other mirrors, check
+ * checksums
+ */
+ for (mirror_index = 0;
+ mirror_index < BTRFS_MAX_MIRRORS &&
+ sblocks_for_recheck[mirror_index].page_count > 0;
+ mirror_index++) {
+ if (mirror_index == failed_mirror_index)
+ continue;
+
+ /* build and submit the bios, check checksums */
+ ret = scrub_recheck_block(fs_info,
+ sblocks_for_recheck + mirror_index,
+ is_metadata, have_csum, csum,
+ generation, sdev->csum_size);
+ if (ret)
+ goto did_not_correct_error;
}
- if (bbio->num_stripes == 1)
- /* there aren't any replicas */
- goto uncorrectable;
+ /*
+ * first try to pick the mirror which is completely without I/O
+ * errors and also does not have a checksum error.
+ * If one is found, and if a checksum is present, the full block
+ * that is known to contain an error is rewritten. Afterwards
+ * the block is known to be corrected.
+ * If a mirror is found which is completely correct, and no
+ * checksum is present, only those pages are rewritten that had
+ * an I/O error in the block to be repaired, since it cannot be
+ * determined, which copy of the other pages is better (and it
+ * could happen otherwise that a correct page would be
+ * overwritten by a bad one).
+ */
+ for (mirror_index = 0;
+ mirror_index < BTRFS_MAX_MIRRORS &&
+ sblocks_for_recheck[mirror_index].page_count > 0;
+ mirror_index++) {
+ struct scrub_block *sblock_other = sblocks_for_recheck +
+ mirror_index;
+
+ if (!sblock_other->header_error &&
+ !sblock_other->checksum_error &&
+ sblock_other->no_io_error_seen) {
+ int force_write = is_metadata || have_csum;
+
+ ret = scrub_repair_block_from_good_copy(sblock_bad,
+ sblock_other,
+ force_write);
+ if (0 == ret)
+ goto corrected_error;
+ }
+ }
/*
- * first find a good copy
+ * in case of I/O errors in the area that is supposed to be
+ * repaired, continue by picking good copies of those pages.
+ * Select the good pages from mirrors to rewrite bad pages from
+ * the area to fix. Afterwards verify the checksum of the block
+ * that is supposed to be repaired. This verification step is
+ * only done for the purpose of statistic counting and for the
+ * final scrub report, whether errors remain.
+ * A perfect algorithm could make use of the checksum and try
+ * all possible combinations of pages from the different mirrors
+ * until the checksum verification succeeds. For example, when
+ * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
+ * of mirror #2 is readable but the final checksum test fails,
+ * then the 2nd page of mirror #3 could be tried, whether now
+ * the final checksum succeedes. But this would be a rare
+ * exception and is therefore not implemented. At least it is
+ * avoided that the good copy is overwritten.
+ * A more useful improvement would be to pick the sectors
+ * without I/O error based on sector sizes (512 bytes on legacy
+ * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
+ * mirror could be repaired by taking 512 byte of a different
+ * mirror, even if other 512 byte sectors in the same PAGE_SIZE
+ * area are unreadable.
*/
- for (i = 0; i < bbio->num_stripes; ++i) {
- if (i + 1 == sbio->spag[ix].mirror_num)
- continue;
- if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev,
- bbio->stripes[i].physical >> 9,
- sbio->bio->bi_io_vec[ix].bv_page)) {
- /* I/O-error, this is not a good copy */
+ /* can only fix I/O errors from here on */
+ if (sblock_bad->no_io_error_seen)
+ goto did_not_correct_error;
+
+ success = 1;
+ for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
+ struct scrub_page *page_bad = sblock_bad->pagev + page_num;
+
+ if (!page_bad->io_error)
continue;
+
+ for (mirror_index = 0;
+ mirror_index < BTRFS_MAX_MIRRORS &&
+ sblocks_for_recheck[mirror_index].page_count > 0;
+ mirror_index++) {
+ struct scrub_block *sblock_other = sblocks_for_recheck +
+ mirror_index;
+ struct scrub_page *page_other = sblock_other->pagev +
+ page_num;
+
+ if (!page_other->io_error) {
+ ret = scrub_repair_page_from_good_copy(
+ sblock_bad, sblock_other, page_num, 0);
+ if (0 == ret) {
+ page_bad->io_error = 0;
+ break; /* succeeded for this page */
+ }
+ }
}
- if (scrub_fixup_check(sbio, ix) == 0)
- break;
+ if (page_bad->io_error) {
+ /* did not find a mirror to copy the page from */
+ success = 0;
+ }
}
- if (i == bbio->num_stripes)
- goto uncorrectable;
- if (!sdev->readonly) {
- /*
- * bi_io_vec[ix].bv_page now contains good data, write it back
- */
- if (scrub_fixup_io(WRITE, sdev->dev->bdev,
- (sbio->physical + ix * PAGE_SIZE) >> 9,
- sbio->bio->bi_io_vec[ix].bv_page)) {
- /* I/O-error, writeback failed, give up */
- goto uncorrectable;
+ if (success) {
+ if (is_metadata || have_csum) {
+ /*
+ * need to verify the checksum now that all
+ * sectors on disk are repaired (the write
+ * request for data to be repaired is on its way).
+ * Just be lazy and use scrub_recheck_block()
+ * which re-reads the data before the checksum
+ * is verified, but most likely the data comes out
+ * of the page cache.
+ */
+ ret = scrub_recheck_block(fs_info, sblock_bad,
+ is_metadata, have_csum, csum,
+ generation, sdev->csum_size);
+ if (!ret && !sblock_bad->header_error &&
+ !sblock_bad->checksum_error &&
+ sblock_bad->no_io_error_seen)
+ goto corrected_error;
+ else
+ goto did_not_correct_error;
+ } else {
+corrected_error:
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.corrected_errors++;
+ spin_unlock(&sdev->stat_lock);
+ printk_ratelimited(KERN_ERR
+ "btrfs: fixed up error at logical %llu on dev %s\n",
+ (unsigned long long)logical, sdev->dev->name);
}
+ } else {
+did_not_correct_error:
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.uncorrectable_errors++;
+ spin_unlock(&sdev->stat_lock);
+ printk_ratelimited(KERN_ERR
+ "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
+ (unsigned long long)logical, sdev->dev->name);
}
- kfree(bbio);
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.corrected_errors;
- spin_unlock(&sdev->stat_lock);
+out:
+ if (sblocks_for_recheck) {
+ for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
+ mirror_index++) {
+ struct scrub_block *sblock = sblocks_for_recheck +
+ mirror_index;
+ int page_index;
+
+ for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
+ page_index++)
+ if (sblock->pagev[page_index].page)
+ __free_page(
+ sblock->pagev[page_index].page);
+ }
+ kfree(sblocks_for_recheck);
+ }
- printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n",
- (unsigned long long)logical);
- return;
+ return 0;
+}
-uncorrectable:
- kfree(bbio);
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.uncorrectable_errors;
- spin_unlock(&sdev->stat_lock);
+static int scrub_setup_recheck_block(struct scrub_dev *sdev,
+ struct btrfs_mapping_tree *map_tree,
+ u64 length, u64 logical,
+ struct scrub_block *sblocks_for_recheck)
+{
+ int page_index;
+ int mirror_index;
+ int ret;
+
+ /*
+ * note: the three members sdev, ref_count and outstanding_pages
+ * are not used (and not set) in the blocks that are used for
+ * the recheck procedure
+ */
+
+ page_index = 0;
+ while (length > 0) {
+ u64 sublen = min_t(u64, length, PAGE_SIZE);
+ u64 mapped_length = sublen;
+ struct btrfs_bio *bbio = NULL;
+
+ /*
+ * with a length of PAGE_SIZE, each returned stripe
+ * represents one mirror
+ */
+ ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
+ &bbio, 0);
+ if (ret || !bbio || mapped_length < sublen) {
+ kfree(bbio);
+ return -EIO;
+ }
- printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at "
- "logical %llu\n", (unsigned long long)logical);
+ BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
+ for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
+ mirror_index++) {
+ struct scrub_block *sblock;
+ struct scrub_page *page;
+
+ if (mirror_index >= BTRFS_MAX_MIRRORS)
+ continue;
+
+ sblock = sblocks_for_recheck + mirror_index;
+ page = sblock->pagev + page_index;
+ page->logical = logical;
+ page->physical = bbio->stripes[mirror_index].physical;
+ page->bdev = bbio->stripes[mirror_index].dev->bdev;
+ page->mirror_num = mirror_index + 1;
+ page->page = alloc_page(GFP_NOFS);
+ if (!page->page) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.malloc_errors++;
+ spin_unlock(&sdev->stat_lock);
+ return -ENOMEM;
+ }
+ sblock->page_count++;
+ }
+ kfree(bbio);
+ length -= sublen;
+ logical += sublen;
+ page_index++;
+ }
+
+ return 0;
}
-static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
- struct page *page)
+/*
+ * this function will check the on disk data for checksum errors, header
+ * errors and read I/O errors. If any I/O errors happen, the exact pages
+ * which are errored are marked as being bad. The goal is to enable scrub
+ * to take those pages that are not errored from all the mirrors so that
+ * the pages that are errored in the just handled mirror can be repaired.
+ */
+static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
+ struct scrub_block *sblock, int is_metadata,
+ int have_csum, u8 *csum, u64 generation,
+ u16 csum_size)
{
- struct bio *bio = NULL;
- int ret;
- DECLARE_COMPLETION_ONSTACK(complete);
+ int page_num;
+
+ sblock->no_io_error_seen = 1;
+ sblock->header_error = 0;
+ sblock->checksum_error = 0;
+
+ for (page_num = 0; page_num < sblock->page_count; page_num++) {
+ struct bio *bio;
+ int ret;
+ struct scrub_page *page = sblock->pagev + page_num;
+ DECLARE_COMPLETION_ONSTACK(complete);
+
+ BUG_ON(!page->page);
+ bio = bio_alloc(GFP_NOFS, 1);
+ bio->bi_bdev = page->bdev;
+ bio->bi_sector = page->physical >> 9;
+ bio->bi_end_io = scrub_complete_bio_end_io;
+ bio->bi_private = &complete;
+
+ ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
+ if (PAGE_SIZE != ret) {
+ bio_put(bio);
+ return -EIO;
+ }
+ btrfsic_submit_bio(READ, bio);
- bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_bdev = bdev;
- bio->bi_sector = sector;
- bio_add_page(bio, page, PAGE_SIZE, 0);
- bio->bi_end_io = scrub_fixup_end_io;
- bio->bi_private = &complete;
- btrfsic_submit_bio(rw, bio);
+ /* this will also unplug the queue */
+ wait_for_completion(&complete);
- /* this will also unplug the queue */
- wait_for_completion(&complete);
+ page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ sblock->no_io_error_seen = 0;
+ bio_put(bio);
+ }
- ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_put(bio);
- return ret;
+ if (sblock->no_io_error_seen)
+ scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
+ have_csum, csum, generation,
+ csum_size);
+
+ return 0;
}
-static void scrub_bio_end_io(struct bio *bio, int err)
+static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
+ struct scrub_block *sblock,
+ int is_metadata, int have_csum,
+ const u8 *csum, u64 generation,
+ u16 csum_size)
{
- struct scrub_bio *sbio = bio->bi_private;
- struct scrub_dev *sdev = sbio->sdev;
- struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+ int page_num;
+ u8 calculated_csum[BTRFS_CSUM_SIZE];
+ u32 crc = ~(u32)0;
+ struct btrfs_root *root = fs_info->extent_root;
+ void *mapped_buffer;
+
+ BUG_ON(!sblock->pagev[0].page);
+ if (is_metadata) {
+ struct btrfs_header *h;
+
+ mapped_buffer = kmap_atomic(sblock->pagev[0].page);
+ h = (struct btrfs_header *)mapped_buffer;
+
+ if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
+ generation != le64_to_cpu(h->generation) ||
+ memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
+ memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+ BTRFS_UUID_SIZE))
+ sblock->header_error = 1;
+ csum = h->csum;
+ } else {
+ if (!have_csum)
+ return;
- sbio->err = err;
- sbio->bio = bio;
+ mapped_buffer = kmap_atomic(sblock->pagev[0].page);
+ }
- btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+ for (page_num = 0;;) {
+ if (page_num == 0 && is_metadata)
+ crc = btrfs_csum_data(root,
+ ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
+ crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
+ else
+ crc = btrfs_csum_data(root, mapped_buffer, crc,
+ PAGE_SIZE);
+
+ kunmap_atomic(mapped_buffer);
+ page_num++;
+ if (page_num >= sblock->page_count)
+ break;
+ BUG_ON(!sblock->pagev[page_num].page);
+
+ mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
+ }
+
+ btrfs_csum_final(crc, calculated_csum);
+ if (memcmp(calculated_csum, csum, csum_size))
+ sblock->checksum_error = 1;
}
-static void scrub_checksum(struct btrfs_work *work)
+static void scrub_complete_bio_end_io(struct bio *bio, int err)
{
- struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
- struct scrub_dev *sdev = sbio->sdev;
- struct page *page;
- void *buffer;
- int i;
- u64 flags;
- u64 logical;
- int ret;
+ complete((struct completion *)bio->bi_private);
+}
- if (sbio->err) {
- ret = 0;
- for (i = 0; i < sbio->count; ++i)
- ret |= scrub_recheck_error(sbio, i);
- if (!ret) {
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.unverified_errors;
- spin_unlock(&sdev->stat_lock);
- }
+static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
+ struct scrub_block *sblock_good,
+ int force_write)
+{
+ int page_num;
+ int ret = 0;
- sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
- sbio->bio->bi_phys_segments = 0;
- sbio->bio->bi_idx = 0;
+ for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
+ int ret_sub;
- for (i = 0; i < sbio->count; i++) {
- struct bio_vec *bi;
- bi = &sbio->bio->bi_io_vec[i];
- bi->bv_offset = 0;
- bi->bv_len = PAGE_SIZE;
- }
- goto out;
+ ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
+ sblock_good,
+ page_num,
+ force_write);
+ if (ret_sub)
+ ret = ret_sub;
}
- for (i = 0; i < sbio->count; ++i) {
- page = sbio->bio->bi_io_vec[i].bv_page;
- buffer = kmap_atomic(page);
- flags = sbio->spag[i].flags;
- logical = sbio->logical + i * PAGE_SIZE;
- ret = 0;
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
- } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
- logical, buffer);
- } else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
- BUG_ON(i);
- (void)scrub_checksum_super(sbio, buffer);
- } else {
- WARN_ON(1);
- }
- kunmap_atomic(buffer);
- if (ret) {
- ret = scrub_recheck_error(sbio, i);
- if (!ret) {
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.unverified_errors;
- spin_unlock(&sdev->stat_lock);
- }
+
+ return ret;
+}
+
+static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
+ struct scrub_block *sblock_good,
+ int page_num, int force_write)
+{
+ struct scrub_page *page_bad = sblock_bad->pagev + page_num;
+ struct scrub_page *page_good = sblock_good->pagev + page_num;
+
+ BUG_ON(sblock_bad->pagev[page_num].page == NULL);
+ BUG_ON(sblock_good->pagev[page_num].page == NULL);
+ if (force_write || sblock_bad->header_error ||
+ sblock_bad->checksum_error || page_bad->io_error) {
+ struct bio *bio;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(complete);
+
+ bio = bio_alloc(GFP_NOFS, 1);
+ bio->bi_bdev = page_bad->bdev;
+ bio->bi_sector = page_bad->physical >> 9;
+ bio->bi_end_io = scrub_complete_bio_end_io;
+ bio->bi_private = &complete;
+
+ ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
+ if (PAGE_SIZE != ret) {
+ bio_put(bio);
+ return -EIO;
}
+ btrfsic_submit_bio(WRITE, bio);
+
+ /* this will also unplug the queue */
+ wait_for_completion(&complete);
+ bio_put(bio);
}
-out:
- scrub_free_bio(sbio->bio);
- sbio->bio = NULL;
- spin_lock(&sdev->list_lock);
- sbio->next_free = sdev->first_free;
- sdev->first_free = sbio->index;
- spin_unlock(&sdev->list_lock);
- atomic_dec(&sdev->in_flight);
- wake_up(&sdev->list_wait);
+ return 0;
+}
+
+static void scrub_checksum(struct scrub_block *sblock)
+{
+ u64 flags;
+ int ret;
+
+ BUG_ON(sblock->page_count < 1);
+ flags = sblock->pagev[0].flags;
+ ret = 0;
+ if (flags & BTRFS_EXTENT_FLAG_DATA)
+ ret = scrub_checksum_data(sblock);
+ else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ ret = scrub_checksum_tree_block(sblock);
+ else if (flags & BTRFS_EXTENT_FLAG_SUPER)
+ (void)scrub_checksum_super(sblock);
+ else
+ WARN_ON(1);
+ if (ret)
+ scrub_handle_errored_block(sblock);
}
-static int scrub_checksum_data(struct scrub_dev *sdev,
- struct scrub_page *spag, void *buffer)
+static int scrub_checksum_data(struct scrub_block *sblock)
{
+ struct scrub_dev *sdev = sblock->sdev;
u8 csum[BTRFS_CSUM_SIZE];
+ u8 *on_disk_csum;
+ struct page *page;
+ void *buffer;
u32 crc = ~(u32)0;
int fail = 0;
struct btrfs_root *root = sdev->dev->dev_root;
+ u64 len;
+ int index;
- if (!spag->have_csum)
+ BUG_ON(sblock->page_count < 1);
+ if (!sblock->pagev[0].have_csum)
return 0;
- crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
+ on_disk_csum = sblock->pagev[0].csum;
+ page = sblock->pagev[0].page;
+ buffer = kmap_atomic(page);
+
+ len = sdev->sectorsize;
+ index = 0;
+ for (;;) {
+ u64 l = min_t(u64, len, PAGE_SIZE);
+
+ crc = btrfs_csum_data(root, buffer, crc, l);
+ kunmap_atomic(buffer);
+ len -= l;
+ if (len == 0)
+ break;
+ index++;
+ BUG_ON(index >= sblock->page_count);
+ BUG_ON(!sblock->pagev[index].page);
+ page = sblock->pagev[index].page;
+ buffer = kmap_atomic(page);
+ }
+
btrfs_csum_final(crc, csum);
- if (memcmp(csum, spag->csum, sdev->csum_size))
+ if (memcmp(csum, on_disk_csum, sdev->csum_size))
fail = 1;
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.data_extents_scrubbed;
- sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
- if (fail)
+ if (fail) {
+ spin_lock(&sdev->stat_lock);
++sdev->stat.csum_errors;
- spin_unlock(&sdev->stat_lock);
+ spin_unlock(&sdev->stat_lock);
+ }
return fail;
}
-static int scrub_checksum_tree_block(struct scrub_dev *sdev,
- struct scrub_page *spag, u64 logical,
- void *buffer)
+static int scrub_checksum_tree_block(struct scrub_block *sblock)
{
+ struct scrub_dev *sdev = sblock->sdev;
struct btrfs_header *h;
struct btrfs_root *root = sdev->dev->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
- u8 csum[BTRFS_CSUM_SIZE];
+ u8 calculated_csum[BTRFS_CSUM_SIZE];
+ u8 on_disk_csum[BTRFS_CSUM_SIZE];
+ struct page *page;
+ void *mapped_buffer;
+ u64 mapped_size;
+ void *p;
u32 crc = ~(u32)0;
int fail = 0;
int crc_fail = 0;
+ u64 len;
+ int index;
+
+ BUG_ON(sblock->page_count < 1);
+ page = sblock->pagev[0].page;
+ mapped_buffer = kmap_atomic(page);
+ h = (struct btrfs_header *)mapped_buffer;
+ memcpy(on_disk_csum, h->csum, sdev->csum_size);
/*
* we don't use the getter functions here, as we
* a) don't have an extent buffer and
* b) the page is already kmapped
*/
- h = (struct btrfs_header *)buffer;
- if (logical != le64_to_cpu(h->bytenr))
+ if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
++fail;
- if (spag->generation != le64_to_cpu(h->generation))
+ if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
++fail;
if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
@@ -887,51 +1305,99 @@ static int scrub_checksum_tree_block(struct scrub_dev *sdev,
BTRFS_UUID_SIZE))
++fail;
- crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
- PAGE_SIZE - BTRFS_CSUM_SIZE);
- btrfs_csum_final(crc, csum);
- if (memcmp(csum, h->csum, sdev->csum_size))
+ BUG_ON(sdev->nodesize != sdev->leafsize);
+ len = sdev->nodesize - BTRFS_CSUM_SIZE;
+ mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
+ p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
+ index = 0;
+ for (;;) {
+ u64 l = min_t(u64, len, mapped_size);
+
+ crc = btrfs_csum_data(root, p, crc, l);
+ kunmap_atomic(mapped_buffer);
+ len -= l;
+ if (len == 0)
+ break;
+ index++;
+ BUG_ON(index >= sblock->page_count);
+ BUG_ON(!sblock->pagev[index].page);
+ page = sblock->pagev[index].page;
+ mapped_buffer = kmap_atomic(page);
+ mapped_size = PAGE_SIZE;
+ p = mapped_buffer;
+ }
+
+ btrfs_csum_final(crc, calculated_csum);
+ if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
++crc_fail;
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.tree_extents_scrubbed;
- sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
- if (crc_fail)
- ++sdev->stat.csum_errors;
- if (fail)
- ++sdev->stat.verify_errors;
- spin_unlock(&sdev->stat_lock);
+ if (crc_fail || fail) {
+ spin_lock(&sdev->stat_lock);
+ if (crc_fail)
+ ++sdev->stat.csum_errors;
+ if (fail)
+ ++sdev->stat.verify_errors;
+ spin_unlock(&sdev->stat_lock);
+ }
return fail || crc_fail;
}
-static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
+static int scrub_checksum_super(struct scrub_block *sblock)
{
struct btrfs_super_block *s;
- u64 logical;
- struct scrub_dev *sdev = sbio->sdev;
+ struct scrub_dev *sdev = sblock->sdev;
struct btrfs_root *root = sdev->dev->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
- u8 csum[BTRFS_CSUM_SIZE];
+ u8 calculated_csum[BTRFS_CSUM_SIZE];
+ u8 on_disk_csum[BTRFS_CSUM_SIZE];
+ struct page *page;
+ void *mapped_buffer;
+ u64 mapped_size;
+ void *p;
u32 crc = ~(u32)0;
int fail = 0;
+ u64 len;
+ int index;
- s = (struct btrfs_super_block *)buffer;
- logical = sbio->logical;
+ BUG_ON(sblock->page_count < 1);
+ page = sblock->pagev[0].page;
+ mapped_buffer = kmap_atomic(page);
+ s = (struct btrfs_super_block *)mapped_buffer;
+ memcpy(on_disk_csum, s->csum, sdev->csum_size);
- if (logical != le64_to_cpu(s->bytenr))
+ if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
++fail;
- if (sbio->spag[0].generation != le64_to_cpu(s->generation))
+ if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
++fail;
if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
++fail;
- crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
- PAGE_SIZE - BTRFS_CSUM_SIZE);
- btrfs_csum_final(crc, csum);
- if (memcmp(csum, s->csum, sbio->sdev->csum_size))
+ len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
+ mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
+ p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
+ index = 0;
+ for (;;) {
+ u64 l = min_t(u64, len, mapped_size);
+
+ crc = btrfs_csum_data(root, p, crc, l);
+ kunmap_atomic(mapped_buffer);
+ len -= l;
+ if (len == 0)
+ break;
+ index++;
+ BUG_ON(index >= sblock->page_count);
+ BUG_ON(!sblock->pagev[index].page);
+ page = sblock->pagev[index].page;
+ mapped_buffer = kmap_atomic(page);
+ mapped_size = PAGE_SIZE;
+ p = mapped_buffer;
+ }
+
+ btrfs_csum_final(crc, calculated_csum);
+ if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
++fail;
if (fail) {
@@ -948,29 +1414,42 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
return fail;
}
-static int scrub_submit(struct scrub_dev *sdev)
+static void scrub_block_get(struct scrub_block *sblock)
+{
+ atomic_inc(&sblock->ref_count);
+}
+
+static void scrub_block_put(struct scrub_block *sblock)
+{
+ if (atomic_dec_and_test(&sblock->ref_count)) {
+ int i;
+
+ for (i = 0; i < sblock->page_count; i++)
+ if (sblock->pagev[i].page)
+ __free_page(sblock->pagev[i].page);
+ kfree(sblock);
+ }
+}
+
+static void scrub_submit(struct scrub_dev *sdev)
{
struct scrub_bio *sbio;
if (sdev->curr == -1)
- return 0;
+ return;
sbio = sdev->bios[sdev->curr];
- sbio->err = 0;
sdev->curr = -1;
atomic_inc(&sdev->in_flight);
btrfsic_submit_bio(READ, sbio->bio);
-
- return 0;
}
-static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
- u64 physical, u64 flags, u64 gen, int mirror_num,
- u8 *csum, int force)
+static int scrub_add_page_to_bio(struct scrub_dev *sdev,
+ struct scrub_page *spage)
{
+ struct scrub_block *sblock = spage->sblock;
struct scrub_bio *sbio;
- struct page *page;
int ret;
again:
@@ -983,7 +1462,7 @@ again:
if (sdev->curr != -1) {
sdev->first_free = sdev->bios[sdev->curr]->next_free;
sdev->bios[sdev->curr]->next_free = -1;
- sdev->bios[sdev->curr]->count = 0;
+ sdev->bios[sdev->curr]->page_count = 0;
spin_unlock(&sdev->list_lock);
} else {
spin_unlock(&sdev->list_lock);
@@ -991,62 +1470,200 @@ again:
}
}
sbio = sdev->bios[sdev->curr];
- if (sbio->count == 0) {
+ if (sbio->page_count == 0) {
struct bio *bio;
- sbio->physical = physical;
- sbio->logical = logical;
- bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
- if (!bio)
- return -ENOMEM;
+ sbio->physical = spage->physical;
+ sbio->logical = spage->logical;
+ bio = sbio->bio;
+ if (!bio) {
+ bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
+ if (!bio)
+ return -ENOMEM;
+ sbio->bio = bio;
+ }
bio->bi_private = sbio;
bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sdev->dev->bdev;
- bio->bi_sector = sbio->physical >> 9;
+ bio->bi_sector = spage->physical >> 9;
sbio->err = 0;
- sbio->bio = bio;
- } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
- sbio->logical + sbio->count * PAGE_SIZE != logical) {
- ret = scrub_submit(sdev);
- if (ret)
- return ret;
+ } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
+ spage->physical ||
+ sbio->logical + sbio->page_count * PAGE_SIZE !=
+ spage->logical) {
+ scrub_submit(sdev);
goto again;
}
- sbio->spag[sbio->count].flags = flags;
- sbio->spag[sbio->count].generation = gen;
- sbio->spag[sbio->count].have_csum = 0;
- sbio->spag[sbio->count].mirror_num = mirror_num;
-
- page = alloc_page(GFP_NOFS);
- if (!page)
- return -ENOMEM;
- ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
- if (!ret) {
- __free_page(page);
- ret = scrub_submit(sdev);
- if (ret)
- return ret;
+ sbio->pagev[sbio->page_count] = spage;
+ ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
+ if (ret != PAGE_SIZE) {
+ if (sbio->page_count < 1) {
+ bio_put(sbio->bio);
+ sbio->bio = NULL;
+ return -EIO;
+ }
+ scrub_submit(sdev);
goto again;
}
- if (csum) {
- sbio->spag[sbio->count].have_csum = 1;
- memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
+ scrub_block_get(sblock); /* one for the added page */
+ atomic_inc(&sblock->outstanding_pages);
+ sbio->page_count++;
+ if (sbio->page_count == sdev->pages_per_bio)
+ scrub_submit(sdev);
+
+ return 0;
+}
+
+static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
+ u64 physical, u64 flags, u64 gen, int mirror_num,
+ u8 *csum, int force)
+{
+ struct scrub_block *sblock;
+ int index;
+
+ sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+ if (!sblock) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.malloc_errors++;
+ spin_unlock(&sdev->stat_lock);
+ return -ENOMEM;
}
- ++sbio->count;
- if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
+
+ /* one ref inside this function, plus one for each page later on */
+ atomic_set(&sblock->ref_count, 1);
+ sblock->sdev = sdev;
+ sblock->no_io_error_seen = 1;
+
+ for (index = 0; len > 0; index++) {
+ struct scrub_page *spage = sblock->pagev + index;
+ u64 l = min_t(u64, len, PAGE_SIZE);
+
+ BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
+ spage->page = alloc_page(GFP_NOFS);
+ if (!spage->page) {
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.malloc_errors++;
+ spin_unlock(&sdev->stat_lock);
+ while (index > 0) {
+ index--;
+ __free_page(sblock->pagev[index].page);
+ }
+ kfree(sblock);
+ return -ENOMEM;
+ }
+ spage->sblock = sblock;
+ spage->bdev = sdev->dev->bdev;
+ spage->flags = flags;
+ spage->generation = gen;
+ spage->logical = logical;
+ spage->physical = physical;
+ spage->mirror_num = mirror_num;
+ if (csum) {
+ spage->have_csum = 1;
+ memcpy(spage->csum, csum, sdev->csum_size);
+ } else {
+ spage->have_csum = 0;
+ }
+ sblock->page_count++;
+ len -= l;
+ logical += l;
+ physical += l;
+ }
+
+ BUG_ON(sblock->page_count == 0);
+ for (index = 0; index < sblock->page_count; index++) {
+ struct scrub_page *spage = sblock->pagev + index;
int ret;
- ret = scrub_submit(sdev);
- if (ret)
+ ret = scrub_add_page_to_bio(sdev, spage);
+ if (ret) {
+ scrub_block_put(sblock);
return ret;
+ }
}
+ if (force)
+ scrub_submit(sdev);
+
+ /* last one frees, either here or in bio completion for last page */
+ scrub_block_put(sblock);
return 0;
}
+static void scrub_bio_end_io(struct bio *bio, int err)
+{
+ struct scrub_bio *sbio = bio->bi_private;
+ struct scrub_dev *sdev = sbio->sdev;
+ struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+
+ sbio->err = err;
+ sbio->bio = bio;
+
+ btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+}
+
+static void scrub_bio_end_io_worker(struct btrfs_work *work)
+{
+ struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
+ struct scrub_dev *sdev = sbio->sdev;
+ int i;
+
+ BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
+ if (sbio->err) {
+ for (i = 0; i < sbio->page_count; i++) {
+ struct scrub_page *spage = sbio->pagev[i];
+
+ spage->io_error = 1;
+ spage->sblock->no_io_error_seen = 0;
+ }
+ }
+
+ /* now complete the scrub_block items that have all pages completed */
+ for (i = 0; i < sbio->page_count; i++) {
+ struct scrub_page *spage = sbio->pagev[i];
+ struct scrub_block *sblock = spage->sblock;
+
+ if (atomic_dec_and_test(&sblock->outstanding_pages))
+ scrub_block_complete(sblock);
+ scrub_block_put(sblock);
+ }
+
+ if (sbio->err) {
+ /* what is this good for??? */
+ sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+ sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
+ sbio->bio->bi_phys_segments = 0;
+ sbio->bio->bi_idx = 0;
+
+ for (i = 0; i < sbio->page_count; i++) {
+ struct bio_vec *bi;
+ bi = &sbio->bio->bi_io_vec[i];
+ bi->bv_offset = 0;
+ bi->bv_len = PAGE_SIZE;
+ }
+ }
+
+ bio_put(sbio->bio);
+ sbio->bio = NULL;
+ spin_lock(&sdev->list_lock);
+ sbio->next_free = sdev->first_free;
+ sdev->first_free = sbio->index;
+ spin_unlock(&sdev->list_lock);
+ atomic_dec(&sdev->in_flight);
+ wake_up(&sdev->list_wait);
+}
+
+static void scrub_block_complete(struct scrub_block *sblock)
+{
+ if (!sblock->no_io_error_seen)
+ scrub_handle_errored_block(sblock);
+ else
+ scrub_checksum(sblock);
+}
+
static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
u8 *csum)
{
@@ -1054,7 +1671,6 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
int ret = 0;
unsigned long i;
unsigned long num_sectors;
- u32 sectorsize = sdev->dev->dev_root->sectorsize;
while (!list_empty(&sdev->csum_list)) {
sum = list_first_entry(&sdev->csum_list,
@@ -1072,7 +1688,7 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
if (!sum)
return 0;
- num_sectors = sum->len / sectorsize;
+ num_sectors = sum->len / sdev->sectorsize;
for (i = 0; i < num_sectors; ++i) {
if (sum->sums[i].bytenr == logical) {
memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
@@ -1093,9 +1709,28 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
{
int ret;
u8 csum[BTRFS_CSUM_SIZE];
+ u32 blocksize;
+
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ blocksize = sdev->sectorsize;
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.data_extents_scrubbed++;
+ sdev->stat.data_bytes_scrubbed += len;
+ spin_unlock(&sdev->stat_lock);
+ } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ BUG_ON(sdev->nodesize != sdev->leafsize);
+ blocksize = sdev->nodesize;
+ spin_lock(&sdev->stat_lock);
+ sdev->stat.tree_extents_scrubbed++;
+ sdev->stat.tree_bytes_scrubbed += len;
+ spin_unlock(&sdev->stat_lock);
+ } else {
+ blocksize = sdev->sectorsize;
+ BUG_ON(1);
+ }
while (len) {
- u64 l = min_t(u64, len, PAGE_SIZE);
+ u64 l = min_t(u64, len, blocksize);
int have_csum = 0;
if (flags & BTRFS_EXTENT_FLAG_DATA) {
@@ -1104,8 +1739,8 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
if (have_csum == 0)
++sdev->stat.no_csum;
}
- ret = scrub_page(sdev, logical, l, physical, flags, gen,
- mirror_num, have_csum ? csum : NULL, 0);
+ ret = scrub_pages(sdev, logical, l, physical, flags, gen,
+ mirror_num, have_csum ? csum : NULL, 0);
if (ret)
return ret;
len -= l;
@@ -1170,6 +1805,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
if (!path)
return -ENOMEM;
+ /*
+ * work on commit root. The related disk blocks are static as
+ * long as COW is applied. This means, it is save to rewrite
+ * them to repair disk errors without any race conditions
+ */
path->search_commit_root = 1;
path->skip_locking = 1;
@@ -1516,15 +2156,18 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
struct btrfs_device *device = sdev->dev;
struct btrfs_root *root = device->dev_root;
+ if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ return -EIO;
+
gen = root->fs_info->last_trans_committed;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
break;
- ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
- BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
+ ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
+ BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
if (ret)
return ret;
}
@@ -1583,10 +2226,30 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
/*
* check some assumptions
*/
- if (root->sectorsize != PAGE_SIZE ||
- root->sectorsize != root->leafsize ||
- root->sectorsize != root->nodesize) {
- printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
+ if (root->nodesize != root->leafsize) {
+ printk(KERN_ERR
+ "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
+ root->nodesize, root->leafsize);
+ return -EINVAL;
+ }
+
+ if (root->nodesize > BTRFS_STRIPE_LEN) {
+ /*
+ * in this case scrub is unable to calculate the checksum
+ * the way scrub is implemented. Do not handle this
+ * situation at all because it won't ever happen.
+ */
+ printk(KERN_ERR
+ "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
+ root->nodesize, BTRFS_STRIPE_LEN);
+ return -EINVAL;
+ }
+
+ if (root->sectorsize != PAGE_SIZE) {
+ /* not supported for data w/o checksums */
+ printk(KERN_ERR
+ "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
+ root->sectorsize, (unsigned long long)PAGE_SIZE);
return -EINVAL;
}
@@ -1656,7 +2319,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
return ret;
}
-int btrfs_scrub_pause(struct btrfs_root *root)
+void btrfs_scrub_pause(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1671,34 +2334,28 @@ int btrfs_scrub_pause(struct btrfs_root *root)
mutex_lock(&fs_info->scrub_lock);
}
mutex_unlock(&fs_info->scrub_lock);
-
- return 0;
}
-int btrfs_scrub_continue(struct btrfs_root *root)
+void btrfs_scrub_continue(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
atomic_dec(&fs_info->scrub_pause_req);
wake_up(&fs_info->scrub_pause_wait);
- return 0;
}
-int btrfs_scrub_pause_super(struct btrfs_root *root)
+void btrfs_scrub_pause_super(struct btrfs_root *root)
{
down_write(&root->fs_info->scrub_super_lock);
- return 0;
}
-int btrfs_scrub_continue_super(struct btrfs_root *root)
+void btrfs_scrub_continue_super(struct btrfs_root *root)
{
up_write(&root->fs_info->scrub_super_lock);
- return 0;
}
-int btrfs_scrub_cancel(struct btrfs_root *root)
+int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
mutex_lock(&fs_info->scrub_lock);
if (!atomic_read(&fs_info->scrubs_running)) {
@@ -1719,6 +2376,11 @@ int btrfs_scrub_cancel(struct btrfs_root *root)
return 0;
}
+int btrfs_scrub_cancel(struct btrfs_root *root)
+{
+ return __btrfs_scrub_cancel(root->fs_info);
+}
+
int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1741,6 +2403,7 @@ int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
return 0;
}
+
int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index bc1f6ad18442..c6ffa5812419 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -44,8 +44,9 @@
#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \
-u##bits btrfs_##name(struct extent_buffer *eb, \
- type *s) \
+void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token); \
+u##bits btrfs_token_##name(struct extent_buffer *eb, \
+ type *s, struct btrfs_map_token *token) \
{ \
unsigned long part_offset = (unsigned long)s; \
unsigned long offset = part_offset + offsetof(type, member); \
@@ -54,9 +55,18 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
char *kaddr; \
unsigned long map_start; \
unsigned long map_len; \
+ unsigned long mem_len = sizeof(((type *)0)->member); \
u##bits res; \
+ if (token && token->kaddr && token->offset <= offset && \
+ token->eb == eb && \
+ (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
+ kaddr = token->kaddr; \
+ p = (type *)(kaddr + part_offset - token->offset); \
+ res = le##bits##_to_cpu(p->member); \
+ return res; \
+ } \
err = map_private_extent_buffer(eb, offset, \
- sizeof(((type *)0)->member), \
+ mem_len, \
&kaddr, &map_start, &map_len); \
if (err) { \
__le##bits leres; \
@@ -65,10 +75,15 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
} \
p = (type *)(kaddr + part_offset - map_start); \
res = le##bits##_to_cpu(p->member); \
+ if (token) { \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ token->eb = eb; \
+ } \
return res; \
} \
-void btrfs_set_##name(struct extent_buffer *eb, \
- type *s, u##bits val) \
+void btrfs_set_token_##name(struct extent_buffer *eb, \
+ type *s, u##bits val, struct btrfs_map_token *token) \
{ \
unsigned long part_offset = (unsigned long)s; \
unsigned long offset = part_offset + offsetof(type, member); \
@@ -77,8 +92,17 @@ void btrfs_set_##name(struct extent_buffer *eb, \
char *kaddr; \
unsigned long map_start; \
unsigned long map_len; \
+ unsigned long mem_len = sizeof(((type *)0)->member); \
+ if (token && token->kaddr && token->offset <= offset && \
+ token->eb == eb && \
+ (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
+ kaddr = token->kaddr; \
+ p = (type *)(kaddr + part_offset - token->offset); \
+ p->member = cpu_to_le##bits(val); \
+ return; \
+ } \
err = map_private_extent_buffer(eb, offset, \
- sizeof(((type *)0)->member), \
+ mem_len, \
&kaddr, &map_start, &map_len); \
if (err) { \
__le##bits val2; \
@@ -88,7 +112,22 @@ void btrfs_set_##name(struct extent_buffer *eb, \
} \
p = (type *)(kaddr + part_offset - map_start); \
p->member = cpu_to_le##bits(val); \
-}
+ if (token) { \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ token->eb = eb; \
+ } \
+} \
+void btrfs_set_##name(struct extent_buffer *eb, \
+ type *s, u##bits val) \
+{ \
+ btrfs_set_token_##name(eb, s, val, NULL); \
+} \
+u##bits btrfs_##name(struct extent_buffer *eb, \
+ type *s) \
+{ \
+ return btrfs_token_##name(eb, s, NULL); \
+} \
#include "ctree.h"
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 81df3fec6a6d..8d5d380f7bdb 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -76,6 +76,9 @@ static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
case -EROFS:
errstr = "Readonly filesystem";
break;
+ case -EEXIST:
+ errstr = "Object already exists";
+ break;
default:
if (nbuf) {
if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
@@ -116,6 +119,8 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
sb->s_flags |= MS_RDONLY;
printk(KERN_INFO "btrfs is forced readonly\n");
+ __btrfs_scrub_cancel(fs_info);
+// WARN_ON(1);
}
}
@@ -124,25 +129,132 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
* invokes the approciate error response.
*/
void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno)
+ unsigned int line, int errno, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
char nbuf[16];
const char *errstr;
+ va_list args;
+ va_start(args, fmt);
/*
* Special case: if the error is EROFS, and we're already
* under MS_RDONLY, then it is safe here.
*/
if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
+ return;
+
+ errstr = btrfs_decode_error(fs_info, errno, nbuf);
+ if (fmt) {
+ struct va_format vaf = {
+ .fmt = fmt,
+ .va = &args,
+ };
+
+ printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
+ sb->s_id, function, line, errstr, &vaf);
+ } else {
+ printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
+ sb->s_id, function, line, errstr);
+ }
+
+ /* Don't go through full error handling during mount */
+ if (sb->s_flags & MS_BORN) {
+ save_error_info(fs_info);
+ btrfs_handle_error(fs_info);
+ }
+ va_end(args);
+}
+
+const char *logtypes[] = {
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notice",
+ "info",
+ "debug",
+};
+
+void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+ struct super_block *sb = fs_info->sb;
+ char lvl[4];
+ struct va_format vaf;
+ va_list args;
+ const char *type = logtypes[4];
+
+ va_start(args, fmt);
+
+ if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
+ strncpy(lvl, fmt, 3);
+ fmt += 3;
+ type = logtypes[fmt[1] - '0'];
+ } else
+ *lvl = '\0';
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
+}
+
+/*
+ * We only mark the transaction aborted and then set the file system read-only.
+ * This will prevent new transactions from starting or trying to join this
+ * one.
+ *
+ * This means that error recovery at the call site is limited to freeing
+ * any local memory allocations and passing the error code up without
+ * further cleanup. The transaction should complete as it normally would
+ * in the call path but will return -EIO.
+ *
+ * We'll complete the cleanup in btrfs_end_transaction and
+ * btrfs_commit_transaction.
+ */
+void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const char *function,
+ unsigned int line, int errno)
+{
+ WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted");
+ trans->aborted = errno;
+ /* Nothing used. The other threads that have joined this
+ * transaction may be able to continue. */
+ if (!trans->blocks_used) {
+ btrfs_printk(root->fs_info, "Aborting unused transaction.\n");
return;
+ }
+ trans->transaction->aborted = errno;
+ __btrfs_std_error(root->fs_info, function, line, errno, NULL);
+}
+/*
+ * __btrfs_panic decodes unexpected, fatal errors from the caller,
+ * issues an alert, and either panics or BUGs, depending on mount options.
+ */
+void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int errno, const char *fmt, ...)
+{
+ char nbuf[16];
+ char *s_id = "<unknown>";
+ const char *errstr;
+ struct va_format vaf = { .fmt = fmt };
+ va_list args;
- errstr = btrfs_decode_error(fs_info, errno, nbuf);
- printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
- sb->s_id, function, line, errstr);
- save_error_info(fs_info);
+ if (fs_info)
+ s_id = fs_info->sb->s_id;
- btrfs_handle_error(fs_info);
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ errstr = btrfs_decode_error(fs_info, errno, nbuf);
+ if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
+ panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
+ s_id, function, line, &vaf, errstr);
+
+ printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
+ s_id, function, line, &vaf, errstr);
+ va_end(args);
+ /* Caller calls BUG() */
}
static void btrfs_put_super(struct super_block *sb)
@@ -166,7 +278,7 @@ enum {
Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
Opt_check_integrity, Opt_check_integrity_including_extent_data,
- Opt_check_integrity_print_mask,
+ Opt_check_integrity_print_mask, Opt_fatal_errors,
Opt_err,
};
@@ -206,12 +318,14 @@ static match_table_t tokens = {
{Opt_check_integrity, "check_int"},
{Opt_check_integrity_including_extent_data, "check_int_data"},
{Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
+ {Opt_fatal_errors, "fatal_errors=%s"},
{Opt_err, NULL},
};
/*
* Regular mount options parser. Everything that is needed only when
* reading in a new superblock is parsed here.
+ * XXX JDM: This needs to be cleaned up for remount.
*/
int btrfs_parse_options(struct btrfs_root *root, char *options)
{
@@ -438,6 +552,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
ret = -EINVAL;
goto out;
#endif
+ case Opt_fatal_errors:
+ if (strcmp(args[0].from, "panic") == 0)
+ btrfs_set_opt(info->mount_opt,
+ PANIC_ON_FATAL_ERROR);
+ else if (strcmp(args[0].from, "bug") == 0)
+ btrfs_clear_opt(info->mount_opt,
+ PANIC_ON_FATAL_ERROR);
+ else {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);
@@ -762,6 +888,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",inode_cache");
if (btrfs_test_opt(root, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
+ if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
+ seq_puts(seq, ",fatal_errors=panic");
return 0;
}
@@ -995,11 +1123,20 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
+ unsigned old_flags = sb->s_flags;
+ unsigned long old_opts = fs_info->mount_opt;
+ unsigned long old_compress_type = fs_info->compress_type;
+ u64 old_max_inline = fs_info->max_inline;
+ u64 old_alloc_start = fs_info->alloc_start;
+ int old_thread_pool_size = fs_info->thread_pool_size;
+ unsigned int old_metadata_ratio = fs_info->metadata_ratio;
int ret;
ret = btrfs_parse_options(root, data);
- if (ret)
- return -EINVAL;
+ if (ret) {
+ ret = -EINVAL;
+ goto restore;
+ }
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
@@ -1007,26 +1144,44 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (*flags & MS_RDONLY) {
sb->s_flags |= MS_RDONLY;
- ret = btrfs_commit_super(root);
- WARN_ON(ret);
+ ret = btrfs_commit_super(root);
+ if (ret)
+ goto restore;
} else {
if (fs_info->fs_devices->rw_devices == 0)
- return -EACCES;
+ ret = -EACCES;
+ goto restore;
if (btrfs_super_log_root(fs_info->super_copy) != 0)
- return -EINVAL;
+ ret = -EINVAL;
+ goto restore;
ret = btrfs_cleanup_fs_roots(fs_info);
- WARN_ON(ret);
+ if (ret)
+ goto restore;
/* recover relocation */
ret = btrfs_recover_relocation(root);
- WARN_ON(ret);
+ if (ret)
+ goto restore;
sb->s_flags &= ~MS_RDONLY;
}
return 0;
+
+restore:
+ /* We've hit an error - don't reset MS_RDONLY */
+ if (sb->s_flags & MS_RDONLY)
+ old_flags |= MS_RDONLY;
+ sb->s_flags = old_flags;
+ fs_info->mount_opt = old_opts;
+ fs_info->compress_type = old_compress_type;
+ fs_info->max_inline = old_max_inline;
+ fs_info->alloc_start = old_alloc_start;
+ fs_info->thread_pool_size = old_thread_pool_size;
+ fs_info->metadata_ratio = old_metadata_ratio;
+ return ret;
}
/* Used to sort the devices by max_avail(descending sort) */
@@ -1356,9 +1511,7 @@ static int __init init_btrfs_fs(void)
if (err)
return err;
- err = btrfs_init_compress();
- if (err)
- goto free_sysfs;
+ btrfs_init_compress();
err = btrfs_init_cachep();
if (err)
@@ -1384,6 +1537,8 @@ static int __init init_btrfs_fs(void)
if (err)
goto unregister_ioctl;
+ btrfs_init_lockdep();
+
printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
return 0;
@@ -1399,7 +1554,6 @@ free_cachep:
btrfs_destroy_cachep();
free_compress:
btrfs_exit_compress();
-free_sysfs:
btrfs_exit_sysfs();
return err;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 04b77e3ceb7a..8da29e8e4de1 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -31,7 +31,7 @@
#define BTRFS_ROOT_TRANS_TAG 0
-static noinline void put_transaction(struct btrfs_transaction *transaction)
+void put_transaction(struct btrfs_transaction *transaction)
{
WARN_ON(atomic_read(&transaction->use_count) == 0);
if (atomic_dec_and_test(&transaction->use_count)) {
@@ -58,6 +58,12 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
spin_lock(&root->fs_info->trans_lock);
loop:
+ /* The file system has been taken offline. No new transactions. */
+ if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ spin_unlock(&root->fs_info->trans_lock);
+ return -EROFS;
+ }
+
if (root->fs_info->trans_no_join) {
if (!nofail) {
spin_unlock(&root->fs_info->trans_lock);
@@ -67,6 +73,8 @@ loop:
cur_trans = root->fs_info->running_transaction;
if (cur_trans) {
+ if (cur_trans->aborted)
+ return cur_trans->aborted;
atomic_inc(&cur_trans->use_count);
atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
@@ -123,6 +131,7 @@ loop:
root->fs_info->generation++;
cur_trans->transid = root->fs_info->generation;
root->fs_info->running_transaction = cur_trans;
+ cur_trans->aborted = 0;
spin_unlock(&root->fs_info->trans_lock);
return 0;
@@ -318,6 +327,7 @@ again:
h->use_count = 1;
h->block_rsv = NULL;
h->orig_rsv = NULL;
+ h->aborted = 0;
smp_mb();
if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -327,8 +337,7 @@ again:
if (num_bytes) {
trace_btrfs_space_reservation(root->fs_info, "transaction",
- (u64)(unsigned long)h,
- num_bytes, 1);
+ h->transid, num_bytes, 1);
h->block_rsv = &root->fs_info->trans_block_rsv;
h->bytes_reserved = num_bytes;
}
@@ -440,6 +449,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_block_rsv *rsv = trans->block_rsv;
int updates;
+ int err;
smp_mb();
if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
@@ -453,8 +463,11 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
- if (updates)
- btrfs_run_delayed_refs(trans, root, updates);
+ if (updates) {
+ err = btrfs_run_delayed_refs(trans, root, updates);
+ if (err) /* Error code will also eval true */
+ return err;
+ }
trans->block_rsv = rsv;
@@ -525,6 +538,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (throttle)
btrfs_run_delayed_iputs(root);
+ if (trans->aborted ||
+ root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ return -EIO;
+ }
+
return 0;
}
@@ -690,11 +708,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
&root->root_item);
- BUG_ON(ret);
+ if (ret)
+ return ret;
old_root_used = btrfs_root_used(&root->root_item);
ret = btrfs_write_dirty_block_groups(trans, root);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
if (root != root->fs_info->extent_root)
@@ -705,6 +725,10 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
/*
* update all the cowonly tree roots on disk
+ *
+ * The error handling in this function may not be obvious. Any of the
+ * failures will cause the file system to go offline. We still need
+ * to clean up the delayed refs.
*/
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
@@ -715,22 +739,30 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
int ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
- BUG_ON(ret);
+ if (ret)
+ return ret;
eb = btrfs_lock_root_node(fs_info->tree_root);
- btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
+ ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
+ 0, &eb);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
+ if (ret)
+ return ret;
+
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
- BUG_ON(ret);
+ if (ret)
+ return ret;
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
- update_cowonly_root(trans, root);
+ ret = update_cowonly_root(trans, root);
+ if (ret)
+ return ret;
}
down_write(&fs_info->extent_commit_sem);
@@ -874,7 +906,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
if (!new_root_item) {
- pending->error = -ENOMEM;
+ ret = pending->error = -ENOMEM;
goto fail;
}
@@ -911,21 +943,24 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* insert the directory item
*/
ret = btrfs_set_inode_index(parent_inode, &index);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_insert_dir_item(trans, parent_root,
dentry->d_name.name, dentry->d_name.len,
parent_inode, &key,
BTRFS_FT_DIR, index);
- if (ret) {
+ if (ret == -EEXIST) {
pending->error = -EEXIST;
dput(parent);
goto fail;
+ } else if (ret) {
+ goto abort_trans_dput;
}
btrfs_i_size_write(parent_inode, parent_inode->i_size +
dentry->d_name.len * 2);
ret = btrfs_update_inode(trans, parent_root, parent_inode);
- BUG_ON(ret);
+ if (ret)
+ goto abort_trans_dput;
/*
* pull in the delayed directory update
@@ -934,7 +969,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* snapshot
*/
ret = btrfs_run_delayed_items(trans, root);
- BUG_ON(ret);
+ if (ret) { /* Transaction aborted */
+ dput(parent);
+ goto fail;
+ }
record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
@@ -949,12 +987,21 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_set_root_flags(new_root_item, root_flags);
old = btrfs_lock_root_node(root);
- btrfs_cow_block(trans, root, old, NULL, 0, &old);
+ ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
+ if (ret) {
+ btrfs_tree_unlock(old);
+ free_extent_buffer(old);
+ goto abort_trans_dput;
+ }
+
btrfs_set_lock_blocking(old);
- btrfs_copy_root(trans, root, old, &tmp, objectid);
+ ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
+ /* clean up in any case */
btrfs_tree_unlock(old);
free_extent_buffer(old);
+ if (ret)
+ goto abort_trans_dput;
/* see comments in should_cow_block() */
root->force_cow = 1;
@@ -966,7 +1013,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
- BUG_ON(ret);
+ if (ret)
+ goto abort_trans_dput;
/*
* insert root back/forward references
@@ -975,19 +1023,32 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
parent_root->root_key.objectid,
btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
- BUG_ON(ret);
dput(parent);
+ if (ret)
+ goto fail;
key.offset = (u64)-1;
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
- BUG_ON(IS_ERR(pending->snap));
+ if (IS_ERR(pending->snap)) {
+ ret = PTR_ERR(pending->snap);
+ goto abort_trans;
+ }
- btrfs_reloc_post_snapshot(trans, pending);
+ ret = btrfs_reloc_post_snapshot(trans, pending);
+ if (ret)
+ goto abort_trans;
+ ret = 0;
fail:
kfree(new_root_item);
trans->block_rsv = rsv;
btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
- return 0;
+ return ret;
+
+abort_trans_dput:
+ dput(parent);
+abort_trans:
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
}
/*
@@ -1124,6 +1185,33 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
return 0;
}
+
+static void cleanup_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_transaction *cur_trans = trans->transaction;
+
+ WARN_ON(trans->use_count > 1);
+
+ spin_lock(&root->fs_info->trans_lock);
+ list_del_init(&cur_trans->list);
+ spin_unlock(&root->fs_info->trans_lock);
+
+ btrfs_cleanup_one_transaction(trans->transaction, root);
+
+ put_transaction(cur_trans);
+ put_transaction(cur_trans);
+
+ trace_btrfs_transaction_commit(root);
+
+ btrfs_scrub_continue(root);
+
+ if (current->journal_info == trans)
+ current->journal_info = NULL;
+
+ kmem_cache_free(btrfs_trans_handle_cachep, trans);
+}
+
/*
* btrfs_transaction state sequence:
* in_commit = 0, blocked = 0 (initial)
@@ -1135,10 +1223,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
unsigned long joined = 0;
- struct btrfs_transaction *cur_trans;
+ struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
DEFINE_WAIT(wait);
- int ret;
+ int ret = -EIO;
int should_grow = 0;
unsigned long now = get_seconds();
int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
@@ -1148,13 +1236,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ if (cur_trans->aborted)
+ goto cleanup_transaction;
+
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
ret = btrfs_run_delayed_refs(trans, root, 0);
- BUG_ON(ret);
+ if (ret)
+ goto cleanup_transaction;
cur_trans = trans->transaction;
+
/*
* set the flushing flag so procs in this transaction have to
* start sending their work down.
@@ -1162,19 +1255,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
cur_trans->delayed_refs.flushing = 1;
ret = btrfs_run_delayed_refs(trans, root, 0);
- BUG_ON(ret);
+ if (ret)
+ goto cleanup_transaction;
spin_lock(&cur_trans->commit_lock);
if (cur_trans->in_commit) {
spin_unlock(&cur_trans->commit_lock);
atomic_inc(&cur_trans->use_count);
- btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans, root);
wait_for_commit(root, cur_trans);
put_transaction(cur_trans);
- return 0;
+ return ret;
}
trans->transaction->in_commit = 1;
@@ -1214,12 +1308,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (flush_on_commit || snap_pending) {
btrfs_start_delalloc_inodes(root, 1);
- ret = btrfs_wait_ordered_extents(root, 0, 1);
- BUG_ON(ret);
+ btrfs_wait_ordered_extents(root, 0, 1);
}
ret = btrfs_run_delayed_items(trans, root);
- BUG_ON(ret);
+ if (ret)
+ goto cleanup_transaction;
/*
* rename don't use btrfs_join_transaction, so, once we
@@ -1261,13 +1355,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_lock(&root->fs_info->reloc_mutex);
ret = btrfs_run_delayed_items(trans, root);
- BUG_ON(ret);
+ if (ret) {
+ mutex_unlock(&root->fs_info->reloc_mutex);
+ goto cleanup_transaction;
+ }
ret = create_pending_snapshots(trans, root->fs_info);
- BUG_ON(ret);
+ if (ret) {
+ mutex_unlock(&root->fs_info->reloc_mutex);
+ goto cleanup_transaction;
+ }
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
- BUG_ON(ret);
+ if (ret) {
+ mutex_unlock(&root->fs_info->reloc_mutex);
+ goto cleanup_transaction;
+ }
/*
* make sure none of the code above managed to slip in a
@@ -1294,7 +1397,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_lock(&root->fs_info->tree_log_mutex);
ret = commit_fs_roots(trans, root);
- BUG_ON(ret);
+ if (ret) {
+ mutex_unlock(&root->fs_info->tree_log_mutex);
+ goto cleanup_transaction;
+ }
/* commit_fs_roots gets rid of all the tree log roots, it is now
* safe to free the root of tree log roots
@@ -1302,7 +1408,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_free_log_root_tree(trans, root->fs_info);
ret = commit_cowonly_roots(trans, root);
- BUG_ON(ret);
+ if (ret) {
+ mutex_unlock(&root->fs_info->tree_log_mutex);
+ goto cleanup_transaction;
+ }
btrfs_prepare_extent_commit(trans, root);
@@ -1336,8 +1445,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
wake_up(&root->fs_info->transaction_wait);
ret = btrfs_write_and_wait_transaction(trans, root);
- BUG_ON(ret);
- write_ctree_super(trans, root, 0);
+ if (ret) {
+ btrfs_error(root->fs_info, ret,
+ "Error while writing out transaction.");
+ mutex_unlock(&root->fs_info->tree_log_mutex);
+ goto cleanup_transaction;
+ }
+
+ ret = write_ctree_super(trans, root, 0);
+ if (ret) {
+ mutex_unlock(&root->fs_info->tree_log_mutex);
+ goto cleanup_transaction;
+ }
/*
* the super is written, we can safely allow the tree-loggers
@@ -1373,6 +1492,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_run_delayed_iputs(root);
return ret;
+
+cleanup_transaction:
+ btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
+// WARN_ON(1);
+ if (current->journal_info == trans)
+ current->journal_info = NULL;
+ cleanup_transaction(trans, root);
+
+ return ret;
}
/*
@@ -1388,6 +1516,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
spin_unlock(&fs_info->trans_lock);
while (!list_empty(&list)) {
+ int ret;
+
root = list_entry(list.next, struct btrfs_root, root_list);
list_del(&root->root_list);
@@ -1395,9 +1525,10 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
- btrfs_drop_snapshot(root, NULL, 0, 0);
+ ret = btrfs_drop_snapshot(root, NULL, 0, 0);
else
- btrfs_drop_snapshot(root, NULL, 1, 0);
+ ret =btrfs_drop_snapshot(root, NULL, 1, 0);
+ BUG_ON(ret < 0);
}
return 0;
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 02564e6230ac..fe27379e368b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -43,6 +43,7 @@ struct btrfs_transaction {
wait_queue_head_t commit_wait;
struct list_head pending_snapshots;
struct btrfs_delayed_ref_root delayed_refs;
+ int aborted;
};
struct btrfs_trans_handle {
@@ -55,6 +56,7 @@ struct btrfs_trans_handle {
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ int aborted;
};
struct btrfs_pending_snapshot {
@@ -114,4 +116,5 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
+void put_transaction(struct btrfs_transaction *transaction);
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 966cc74f5d6c..d017283ae6f5 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -212,14 +212,13 @@ int btrfs_pin_log_trans(struct btrfs_root *root)
* indicate we're done making changes to the log tree
* and wake up anyone waiting to do a sync
*/
-int btrfs_end_log_trans(struct btrfs_root *root)
+void btrfs_end_log_trans(struct btrfs_root *root)
{
if (atomic_dec_and_test(&root->log_writers)) {
smp_mb();
if (waitqueue_active(&root->log_writer_wait))
wake_up(&root->log_writer_wait);
}
- return 0;
}
@@ -378,12 +377,11 @@ insert:
u32 found_size;
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
- if (found_size > item_size) {
+ if (found_size > item_size)
btrfs_truncate_item(trans, root, path, item_size, 1);
- } else if (found_size < item_size) {
- ret = btrfs_extend_item(trans, root, path,
- item_size - found_size);
- }
+ else if (found_size < item_size)
+ btrfs_extend_item(trans, root, path,
+ item_size - found_size);
} else if (ret) {
return ret;
}
@@ -1763,7 +1761,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
bytenr, blocksize);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or logic errors */
}
free_extent_buffer(next);
continue;
@@ -1871,20 +1869,26 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
wret = walk_down_log_tree(trans, log, path, &level, wc);
if (wret > 0)
break;
- if (wret < 0)
+ if (wret < 0) {
ret = wret;
+ goto out;
+ }
wret = walk_up_log_tree(trans, log, path, &level, wc);
if (wret > 0)
break;
- if (wret < 0)
+ if (wret < 0) {
ret = wret;
+ goto out;
+ }
}
/* was the root node processed? if not, catch it here */
if (path->nodes[orig_level]) {
- wc->process_func(log, path->nodes[orig_level], wc,
+ ret = wc->process_func(log, path->nodes[orig_level], wc,
btrfs_header_generation(path->nodes[orig_level]));
+ if (ret)
+ goto out;
if (wc->free) {
struct extent_buffer *next;
@@ -1900,10 +1904,11 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(log, next->start,
next->len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or logic errors */
}
}
+out:
for (i = 0; i <= orig_level; i++) {
if (path->nodes[i]) {
free_extent_buffer(path->nodes[i]);
@@ -1963,8 +1968,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans,
return 0;
}
-static int wait_for_writer(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static void wait_for_writer(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
{
DEFINE_WAIT(wait);
while (root->fs_info->last_trans_log_full_commit !=
@@ -1978,7 +1983,6 @@ static int wait_for_writer(struct btrfs_trans_handle *trans,
mutex_lock(&root->log_mutex);
finish_wait(&root->log_writer_wait, &wait);
}
- return 0;
}
/*
@@ -2046,7 +2050,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* wait for them until later.
*/
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ mutex_unlock(&root->log_mutex);
+ goto out;
+ }
btrfs_set_root_node(&log->root_item, log->node);
@@ -2077,7 +2085,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
if (ret) {
- BUG_ON(ret != -ENOSPC);
+ if (ret != -ENOSPC) {
+ btrfs_abort_transaction(trans, root, ret);
+ mutex_unlock(&log_root_tree->log_mutex);
+ goto out;
+ }
root->fs_info->last_trans_log_full_commit = trans->transid;
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2117,7 +2129,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_write_and_wait_marked_extents(log_root_tree,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ mutex_unlock(&log_root_tree->log_mutex);
+ goto out_wake_log_root;
+ }
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
btrfs_set_super_log_root(root->fs_info->super_for_commit,
@@ -2326,7 +2342,9 @@ out_unlock:
if (ret == -ENOSPC) {
root->fs_info->last_trans_log_full_commit = trans->transid;
ret = 0;
- }
+ } else if (ret < 0)
+ btrfs_abort_transaction(trans, root, ret);
+
btrfs_end_log_trans(root);
return err;
@@ -2357,7 +2375,8 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
if (ret == -ENOSPC) {
root->fs_info->last_trans_log_full_commit = trans->transid;
ret = 0;
- }
+ } else if (ret < 0 && ret != -ENOENT)
+ btrfs_abort_transaction(trans, root, ret);
btrfs_end_log_trans(root);
return ret;
@@ -3169,13 +3188,20 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
fs_info->log_root_recovering = 1;
trans = btrfs_start_transaction(fs_info->tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto error;
+ }
wc.trans = trans;
wc.pin = 1;
ret = walk_log_tree(trans, log_root_tree, &wc);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(fs_info, ret, "Failed to pin buffers while "
+ "recovering log root tree.");
+ goto error;
+ }
again:
key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3184,8 +3210,12 @@ again:
while (1) {
ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
- if (ret < 0)
- break;
+
+ if (ret < 0) {
+ btrfs_error(fs_info, ret,
+ "Couldn't find tree log root.");
+ goto error;
+ }
if (ret > 0) {
if (path->slots[0] == 0)
break;
@@ -3199,14 +3229,24 @@ again:
log = btrfs_read_fs_root_no_radix(log_root_tree,
&found_key);
- BUG_ON(IS_ERR(log));
+ if (IS_ERR(log)) {
+ ret = PTR_ERR(log);
+ btrfs_error(fs_info, ret,
+ "Couldn't read tree log root.");
+ goto error;
+ }
tmp_key.objectid = found_key.offset;
tmp_key.type = BTRFS_ROOT_ITEM_KEY;
tmp_key.offset = (u64)-1;
wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
- BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
+ if (IS_ERR(wc.replay_dest)) {
+ ret = PTR_ERR(wc.replay_dest);
+ btrfs_error(fs_info, ret, "Couldn't read target root "
+ "for tree log recovery.");
+ goto error;
+ }
wc.replay_dest->log_root = log;
btrfs_record_root_in_trans(trans, wc.replay_dest);
@@ -3254,6 +3294,10 @@ again:
kfree(log_root_tree);
return 0;
+
+error:
+ btrfs_free_path(path);
+ return ret;
}
/*
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 2270ac58d746..862ac813f6b8 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -38,7 +38,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
struct inode *inode, u64 dirid);
-int btrfs_end_log_trans(struct btrfs_root *root);
+void btrfs_end_log_trans(struct btrfs_root *root);
int btrfs_pin_log_trans(struct btrfs_root *root);
int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ef41f285a475..a872b48be0ae 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -67,7 +67,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
kfree(fs_devices);
}
-int btrfs_cleanup_fs_uuids(void)
+void btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
@@ -77,7 +77,6 @@ int btrfs_cleanup_fs_uuids(void)
list_del(&fs_devices->list);
free_fs_devices(fs_devices);
}
- return 0;
}
static noinline struct btrfs_device *__find_device(struct list_head *head,
@@ -130,7 +129,7 @@ static void requeue_list(struct btrfs_pending_bios *pending_bios,
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
-static noinline int run_scheduled_bios(struct btrfs_device *device)
+static noinline void run_scheduled_bios(struct btrfs_device *device)
{
struct bio *pending;
struct backing_dev_info *bdi;
@@ -316,7 +315,6 @@ loop_lock:
done:
blk_finish_plug(&plug);
- return 0;
}
static void pending_bios_fn(struct btrfs_work *work)
@@ -455,7 +453,7 @@ error:
return ERR_PTR(-ENOMEM);
}
-int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *next;
@@ -503,7 +501,6 @@ again:
fs_devices->latest_trans = latest_transid;
mutex_unlock(&uuid_mutex);
- return 0;
}
static void __free_device(struct work_struct *work)
@@ -552,10 +549,10 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
fs_devices->num_can_discard--;
new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
- BUG_ON(!new_device);
+ BUG_ON(!new_device); /* -ENOMEM */
memcpy(new_device, device, sizeof(*new_device));
new_device->name = kstrdup(device->name, GFP_NOFS);
- BUG_ON(device->name && !new_device->name);
+ BUG_ON(device->name && !new_device->name); /* -ENOMEM */
new_device->bdev = NULL;
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
@@ -625,6 +622,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
printk(KERN_INFO "open %s failed\n", device->name);
goto error;
}
+ filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ invalidate_bdev(bdev);
set_blocksize(bdev, 4096);
bh = btrfs_read_dev_super(bdev);
@@ -1039,8 +1038,10 @@ again:
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
+ } else {
+ btrfs_error(root->fs_info, ret, "Slot search failed");
+ goto out;
}
- BUG_ON(ret);
if (device->bytes_used > 0) {
u64 len = btrfs_dev_extent_length(leaf, extent);
@@ -1050,7 +1051,10 @@ again:
spin_unlock(&root->fs_info->free_chunk_lock);
}
ret = btrfs_del_item(trans, root, path);
-
+ if (ret) {
+ btrfs_error(root->fs_info, ret,
+ "Failed to remove dev extent item");
+ }
out:
btrfs_free_path(path);
return ret;
@@ -1078,7 +1082,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*extent));
- BUG_ON(ret);
+ if (ret)
+ goto out;
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1093,6 +1098,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
btrfs_mark_buffer_dirty(leaf);
+out:
btrfs_free_path(path);
return ret;
}
@@ -1118,7 +1124,7 @@ static noinline int find_next_chunk(struct btrfs_root *root,
if (ret < 0)
goto error;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
if (ret) {
@@ -1162,7 +1168,7 @@ static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
if (ret < 0)
goto error;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
BTRFS_DEV_ITEM_KEY);
@@ -1350,6 +1356,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
}
set_blocksize(bdev, 4096);
+ invalidate_bdev(bdev);
bh = btrfs_read_dev_super(bdev);
if (!bh) {
ret = -EINVAL;
@@ -1596,7 +1603,7 @@ next_slot:
(unsigned long)btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
- BUG_ON(!device);
+ BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
btrfs_set_device_generation(leaf, dev_item,
@@ -1706,7 +1713,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
ret = btrfs_prepare_sprout(root);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
device->fs_devices = root->fs_info->fs_devices;
@@ -1744,11 +1751,15 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (seeding_dev) {
ret = init_first_rw_device(trans, root, device);
- BUG_ON(ret);
+ if (ret)
+ goto error_trans;
ret = btrfs_finish_sprout(trans, root);
- BUG_ON(ret);
+ if (ret)
+ goto error_trans;
} else {
ret = btrfs_add_device(trans, root, device);
+ if (ret)
+ goto error_trans;
}
/*
@@ -1758,17 +1769,31 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
btrfs_clear_space_info_full(root->fs_info);
unlock_chunks(root);
- btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
+ if (ret) /* transaction commit */
+ return ret;
+
ret = btrfs_relocate_sys_chunks(root);
- BUG_ON(ret);
+ if (ret < 0)
+ btrfs_error(root->fs_info, ret,
+ "Failed to relocate sys chunks after "
+ "device initialization. This can be fixed "
+ "using the \"btrfs balance\" command.");
}
return ret;
+
+error_trans:
+ unlock_chunks(root);
+ btrfs_abort_transaction(trans, root, ret);
+ btrfs_end_transaction(trans, root);
+ kfree(device->name);
+ kfree(device);
error:
blkdev_put(bdev, FMODE_EXCL);
if (seeding_dev) {
@@ -1876,10 +1901,20 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
key.type = BTRFS_CHUNK_ITEM_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- BUG_ON(ret);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0) { /* Logic error or corruption */
+ btrfs_error(root->fs_info, -ENOENT,
+ "Failed lookup while freeing chunk.");
+ ret = -ENOENT;
+ goto out;
+ }
ret = btrfs_del_item(trans, root, path);
-
+ if (ret < 0)
+ btrfs_error(root->fs_info, ret,
+ "Failed to delete chunk item.");
+out:
btrfs_free_path(path);
return ret;
}
@@ -2041,7 +2076,7 @@ again:
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0)
goto error;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
@@ -2250,15 +2285,13 @@ static void unset_balance_control(struct btrfs_fs_info *fs_info)
* Balance filters. Return 1 if chunk should be filtered out
* (should not be balanced).
*/
-static int chunk_profiles_filter(u64 chunk_profile,
+static int chunk_profiles_filter(u64 chunk_type,
struct btrfs_balance_args *bargs)
{
- chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
- if (chunk_profile == 0)
- chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+ chunk_type = chunk_to_extended(chunk_type) &
+ BTRFS_EXTENDED_PROFILE_MASK;
- if (bargs->profiles & chunk_profile)
+ if (bargs->profiles & chunk_type)
return 0;
return 1;
@@ -2365,18 +2398,16 @@ static int chunk_vrange_filter(struct extent_buffer *leaf,
return 1;
}
-static int chunk_soft_convert_filter(u64 chunk_profile,
+static int chunk_soft_convert_filter(u64 chunk_type,
struct btrfs_balance_args *bargs)
{
if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
return 0;
- chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+ chunk_type = chunk_to_extended(chunk_type) &
+ BTRFS_EXTENDED_PROFILE_MASK;
- if (chunk_profile == 0)
- chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-
- if (bargs->target & chunk_profile)
+ if (bargs->target == chunk_type)
return 1;
return 0;
@@ -2602,6 +2633,30 @@ error:
return ret;
}
+/**
+ * alloc_profile_is_valid - see if a given profile is valid and reduced
+ * @flags: profile to validate
+ * @extended: if true @flags is treated as an extended profile
+ */
+static int alloc_profile_is_valid(u64 flags, int extended)
+{
+ u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
+ BTRFS_BLOCK_GROUP_PROFILE_MASK);
+
+ flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+ /* 1) check that all other bits are zeroed */
+ if (flags & ~mask)
+ return 0;
+
+ /* 2) see if profile is reduced */
+ if (flags == 0)
+ return !extended; /* "0" is valid for usual profiles */
+
+ /* true if exactly one bit set */
+ return (flags & (flags - 1)) == 0;
+}
+
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
/* cancel requested || normal exit path */
@@ -2630,6 +2685,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
{
struct btrfs_fs_info *fs_info = bctl->fs_info;
u64 allowed;
+ int mixed = 0;
int ret;
if (btrfs_fs_closing(fs_info) ||
@@ -2639,13 +2695,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
goto out;
}
+ allowed = btrfs_super_incompat_flags(fs_info->super_copy);
+ if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = 1;
+
/*
* In case of mixed groups both data and meta should be picked,
* and identical options should be given for both of them.
*/
- allowed = btrfs_super_incompat_flags(fs_info->super_copy);
- if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
- (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
+ allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
+ if (mixed && (bctl->flags & allowed)) {
if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
!(bctl->flags & BTRFS_BALANCE_METADATA) ||
memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
@@ -2656,14 +2715,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
}
- /*
- * Profile changing sanity checks. Skip them if a simple
- * balance is requested.
- */
- if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
- BTRFS_BALANCE_ARGS_CONVERT))
- goto do_balance;
-
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
if (fs_info->fs_devices->num_devices == 1)
allowed |= BTRFS_BLOCK_GROUP_DUP;
@@ -2673,24 +2724,27 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10);
- if (!profile_is_valid(bctl->data.target, 1) ||
- bctl->data.target & ~allowed) {
+ if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (!alloc_profile_is_valid(bctl->data.target, 1) ||
+ (bctl->data.target & ~allowed))) {
printk(KERN_ERR "btrfs: unable to start balance with target "
"data profile %llu\n",
(unsigned long long)bctl->data.target);
ret = -EINVAL;
goto out;
}
- if (!profile_is_valid(bctl->meta.target, 1) ||
- bctl->meta.target & ~allowed) {
+ if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (!alloc_profile_is_valid(bctl->meta.target, 1) ||
+ (bctl->meta.target & ~allowed))) {
printk(KERN_ERR "btrfs: unable to start balance with target "
"metadata profile %llu\n",
(unsigned long long)bctl->meta.target);
ret = -EINVAL;
goto out;
}
- if (!profile_is_valid(bctl->sys.target, 1) ||
- bctl->sys.target & ~allowed) {
+ if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (!alloc_profile_is_valid(bctl->sys.target, 1) ||
+ (bctl->sys.target & ~allowed))) {
printk(KERN_ERR "btrfs: unable to start balance with target "
"system profile %llu\n",
(unsigned long long)bctl->sys.target);
@@ -2698,7 +2752,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
goto out;
}
- if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
+ /* allow dup'ed data chunks only in mixed mode */
+ if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
printk(KERN_ERR "btrfs: dup for data is not allowed\n");
ret = -EINVAL;
goto out;
@@ -2724,7 +2780,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
}
-do_balance:
ret = insert_balance_item(fs_info->tree_root, bctl);
if (ret && ret != -EEXIST)
goto out;
@@ -2967,7 +3022,7 @@ again:
key.offset = (u64)-1;
key.type = BTRFS_DEV_EXTENT_KEY;
- while (1) {
+ do {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto done;
@@ -3009,8 +3064,7 @@ again:
goto done;
if (ret == -ENOSPC)
failed++;
- key.offset -= 1;
- }
+ } while (key.offset-- > 0);
if (failed && !retried) {
failed = 0;
@@ -3128,11 +3182,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
int i;
int j;
- if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
- (type & BTRFS_BLOCK_GROUP_DUP)) {
- WARN_ON(1);
- type &= ~BTRFS_BLOCK_GROUP_DUP;
- }
+ BUG_ON(!alloc_profile_is_valid(type, 0));
if (list_empty(&fs_devices->alloc_list))
return -ENOSPC;
@@ -3328,13 +3378,15 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
- BUG_ON(ret);
free_extent_map(em);
+ if (ret)
+ goto error;
ret = btrfs_make_block_group(trans, extent_root, 0, type,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, num_bytes);
- BUG_ON(ret);
+ if (ret)
+ goto error;
for (i = 0; i < map->num_stripes; ++i) {
struct btrfs_device *device;
@@ -3347,7 +3399,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
info->chunk_root->root_key.objectid,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, dev_offset, stripe_size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto error;
+ }
}
kfree(devices_info);
@@ -3383,7 +3438,8 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
device = map->stripes[index].dev;
device->bytes_used += stripe_size;
ret = btrfs_update_device(trans, device);
- BUG_ON(ret);
+ if (ret)
+ goto out_free;
index++;
}
@@ -3420,16 +3476,19 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
key.offset = chunk_offset;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
- BUG_ON(ret);
- if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+ if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+ /*
+ * TODO: Cleanup of inserted chunk root in case of
+ * failure.
+ */
ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
item_size);
- BUG_ON(ret);
}
+out_free:
kfree(chunk);
- return 0;
+ return ret;
}
/*
@@ -3461,7 +3520,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
chunk_size, stripe_size);
- BUG_ON(ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -3493,7 +3553,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
&stripe_size, chunk_offset, alloc_profile);
- BUG_ON(ret);
+ if (ret)
+ return ret;
sys_chunk_offset = chunk_offset + chunk_size;
@@ -3504,10 +3565,12 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
&sys_chunk_size, &sys_stripe_size,
sys_chunk_offset, alloc_profile);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
ret = btrfs_add_device(trans, fs_info->chunk_root, device);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
/*
* Modifying chunk tree needs allocating new blocks from both
@@ -3517,13 +3580,20 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
*/
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
chunk_size, stripe_size);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
ret = __finish_chunk_alloc(trans, extent_root, sys_map,
sys_chunk_offset, sys_chunk_size,
sys_stripe_size);
- BUG_ON(ret);
+ if (ret)
+ goto abort;
+
return 0;
+
+abort:
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
}
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
@@ -3874,7 +3944,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
do_div(length, map->num_stripes);
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
- BUG_ON(!buf);
+ BUG_ON(!buf); /* -ENOMEM */
for (i = 0; i < map->num_stripes; i++) {
if (devid && map->stripes[i].dev->devid != devid)
@@ -3967,7 +4037,7 @@ struct async_sched {
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-static noinline int schedule_bio(struct btrfs_root *root,
+static noinline void schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio)
{
@@ -3979,7 +4049,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
bio_get(bio);
btrfsic_submit_bio(rw, bio);
bio_put(bio);
- return 0;
+ return;
}
/*
@@ -4013,7 +4083,6 @@ static noinline int schedule_bio(struct btrfs_root *root,
if (should_queue)
btrfs_queue_worker(&root->fs_info->submit_workers,
&device->work);
- return 0;
}
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
@@ -4036,7 +4105,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
mirror_num);
- BUG_ON(ret);
+ if (ret) /* -ENOMEM */
+ return ret;
total_devs = bbio->num_stripes;
if (map_length < length) {
@@ -4055,7 +4125,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
while (dev_nr < total_devs) {
if (dev_nr < total_devs - 1) {
bio = bio_clone(first_bio, GFP_NOFS);
- BUG_ON(!bio);
+ BUG_ON(!bio); /* -ENOMEM */
} else {
bio = first_bio;
}
@@ -4209,13 +4279,13 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em);
write_unlock(&map_tree->map_tree.lock);
- BUG_ON(ret);
+ BUG_ON(ret); /* Tree corruption */
free_extent_map(em);
return 0;
}
-static int fill_device_from_item(struct extent_buffer *leaf,
+static void fill_device_from_item(struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item,
struct btrfs_device *device)
{
@@ -4232,8 +4302,6 @@ static int fill_device_from_item(struct extent_buffer *leaf,
ptr = (unsigned long)btrfs_device_uuid(dev_item);
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
-
- return 0;
}
static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
@@ -4384,7 +4452,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
* to silence the warning eg. on PowerPC 64.
*/
if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
- SetPageUptodate(sb->first_page);
+ SetPageUptodate(sb->pages[0]);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
array_size = btrfs_super_sys_array_size(super_copy);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 19ac95048b88..bb6b03f97aaa 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -260,12 +260,12 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret);
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
-int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
+void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
int btrfs_add_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device);
int btrfs_rm_device(struct btrfs_root *root, char *device_path);
-int btrfs_cleanup_fs_uuids(void);
+void btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 573b899b5a5d..270464629416 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -58,15 +58,16 @@ cifs_dump_mem(char *label, void *data, int length)
}
#ifdef CONFIG_CIFS_DEBUG2
-void cifs_dump_detail(struct smb_hdr *smb)
+void cifs_dump_detail(void *buf)
{
+ struct smb_hdr *smb = (struct smb_hdr *)buf;
+
cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
}
-
void cifs_dump_mids(struct TCP_Server_Info *server)
{
struct list_head *tmp;
@@ -79,15 +80,15 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
- mid_entry->midState,
- (int)mid_entry->command,
+ cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu",
+ mid_entry->mid_state,
+ le16_to_cpu(mid_entry->command),
mid_entry->pid,
mid_entry->callback_data,
mid_entry->mid);
#ifdef CONFIG_CIFS_STATS2
cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
- mid_entry->largeBuf,
+ mid_entry->large_buf,
mid_entry->resp_buf,
mid_entry->when_received,
jiffies);
@@ -217,12 +218,12 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
mid_entry = list_entry(tmp3, struct mid_q_entry,
qhead);
seq_printf(m, "\tState: %d com: %d pid:"
- " %d cbdata: %p mid %d\n",
- mid_entry->midState,
- (int)mid_entry->command,
- mid_entry->pid,
- mid_entry->callback_data,
- mid_entry->mid);
+ " %d cbdata: %p mid %llu\n",
+ mid_entry->mid_state,
+ le16_to_cpu(mid_entry->command),
+ mid_entry->pid,
+ mid_entry->callback_data,
+ mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
}
@@ -417,7 +418,6 @@ static const struct file_operations cifs_stats_proc_fops = {
static struct proc_dir_entry *proc_fs_cifs;
static const struct file_operations cifsFYI_proc_fops;
-static const struct file_operations cifs_oplock_proc_fops;
static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
static const struct file_operations cifs_multiuser_mount_proc_fops;
@@ -438,7 +438,6 @@ cifs_proc_init(void)
#endif /* STATS */
proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
- proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -462,7 +461,6 @@ cifs_proc_clean(void)
remove_proc_entry("Stats", proc_fs_cifs);
#endif
remove_proc_entry("MultiuserMount", proc_fs_cifs);
- remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -508,46 +506,6 @@ static const struct file_operations cifsFYI_proc_fops = {
.write = cifsFYI_proc_write,
};
-static int cifs_oplock_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", enable_oplocks);
- return 0;
-}
-
-static int cifs_oplock_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cifs_oplock_proc_show, NULL);
-}
-
-static ssize_t cifs_oplock_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
-
- printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
- "will be removed in kernel version 3.4. Please migrate to "
- "using the 'enable_oplocks' module parameter in cifs.ko.\n");
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- enable_oplocks = false;
- else if (c == '1' || c == 'y' || c == 'Y')
- enable_oplocks = true;
-
- return count;
-}
-
-static const struct file_operations cifs_oplock_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_oplock_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_oplock_proc_write,
-};
-
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 8942b28cf807..566e0ae8dc2c 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -26,13 +26,13 @@
void cifs_dump_mem(char *label, void *data, int length);
#ifdef CONFIG_CIFS_DEBUG2
#define DBG2 2
-void cifs_dump_detail(struct smb_hdr *);
+void cifs_dump_detail(void *);
void cifs_dump_mids(struct TCP_Server_Info *);
#else
#define DBG2 0
#endif
extern int traceSMB; /* flag which enables the function below */
-void dump_smb(struct smb_hdr *, int);
+void dump_smb(void *, int);
#define CIFS_INFO 0x01
#define CIFS_RC 0x02
#define CIFS_TIMER 0x04
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index eee522c56ef0..d34212822444 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -85,6 +85,8 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
+struct workqueue_struct *cifsiod_wq;
+
static int
cifs_read_super(struct super_block *sb)
{
@@ -1111,9 +1113,15 @@ init_cifs(void)
cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
}
+ cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!cifsiod_wq) {
+ rc = -ENOMEM;
+ goto out_clean_proc;
+ }
+
rc = cifs_fscache_register();
if (rc)
- goto out_clean_proc;
+ goto out_destroy_wq;
rc = cifs_init_inodecache();
if (rc)
@@ -1161,6 +1169,8 @@ out_destroy_inodecache:
cifs_destroy_inodecache();
out_unreg_fscache:
cifs_fscache_unregister();
+out_destroy_wq:
+ destroy_workqueue(cifsiod_wq);
out_clean_proc:
cifs_proc_clean();
return rc;
@@ -1183,6 +1193,7 @@ exit_cifs(void)
cifs_destroy_mids();
cifs_destroy_inodecache();
cifs_fscache_unregister();
+ destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index fe5ecf1b422a..d1389bb33ceb 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "1.76"
+#define CIFS_VERSION "1.77"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 339ebe3ebc0d..4ff6313f0a91 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -230,6 +230,12 @@ struct cifs_mnt_data {
int flags;
};
+static inline unsigned int
+get_rfc1002_length(void *buf)
+{
+ return be32_to_cpu(*((__be32 *)buf));
+}
+
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
@@ -276,7 +282,7 @@ struct TCP_Server_Info {
vcnumbers */
int capabilities; /* allow selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
- __u16 CurrentMid; /* multiplex id - rotating counter */
+ __u64 CurrentMid; /* multiplex id - rotating counter */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
@@ -335,6 +341,18 @@ has_credits(struct TCP_Server_Info *server, int *credits)
return num > 0;
}
+static inline size_t
+header_size(void)
+{
+ return sizeof(struct smb_hdr);
+}
+
+static inline size_t
+max_header_size(void)
+{
+ return MAX_CIFS_HDR_SIZE;
+}
+
/*
* Macros to allow the TCP_Server_Info->net field and related code to drop out
* when CONFIG_NET_NS isn't set.
@@ -583,9 +601,11 @@ struct cifs_io_parms {
* Take a reference on the file private data. Must be called with
* cifs_file_list_lock held.
*/
-static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+static inline
+struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
++cifs_file->count;
+ return cifs_file;
}
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
@@ -606,7 +626,7 @@ struct cifsInodeInfo {
bool delete_pending; /* DELETE_ON_CLOSE is set */
bool invalid_mapping; /* pagecache is invalid */
unsigned long time; /* jiffies of last update of inode */
- u64 server_eof; /* current file size on server */
+ u64 server_eof; /* current file size on server -- protected by i_lock */
u64 uniqueid; /* server inode number */
u64 createtime; /* creation time on server */
#ifdef CONFIG_CIFS_FSCACHE
@@ -713,8 +733,8 @@ typedef void (mid_callback_t)(struct mid_q_entry *mid);
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
- __u16 mid; /* multiplex id */
- __u16 pid; /* process id */
+ __u64 mid; /* multiplex id */
+ __u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
unsigned long when_alloc; /* when mid was created */
#ifdef CONFIG_CIFS_STATS2
@@ -724,10 +744,10 @@ struct mid_q_entry {
mid_receive_t *receive; /* call receive callback */
mid_callback_t *callback; /* call completion callback */
void *callback_data; /* general purpose pointer for callback */
- struct smb_hdr *resp_buf; /* pointer to received SMB header */
- int midState; /* wish this were enum but can not pass to wait_event */
- __u8 command; /* smb command code */
- bool largeBuf:1; /* if valid response, is pointer to large buf */
+ void *resp_buf; /* pointer to received SMB header */
+ int mid_state; /* wish this were enum but can not pass to wait_event */
+ __le16 command; /* smb command code */
+ bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
};
@@ -1052,5 +1072,6 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
void cifs_oplock_break(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
+extern struct workqueue_struct *cifsiod_wq;
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 503e73d8bdb7..96192c1e380a 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -77,7 +77,7 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* out */ ,
int * /* bytes returned */ , const int long_op);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
- struct smb_hdr *in_buf, int flags);
+ char *in_buf, int flags);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -91,9 +91,8 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
extern void cifs_add_credits(struct TCP_Server_Info *server,
const unsigned int add);
extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
-extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
-extern bool is_valid_oplock_break(struct smb_hdr *smb,
- struct TCP_Server_Info *);
+extern int checkSMB(char *buf, unsigned int length);
+extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
@@ -107,7 +106,7 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
const unsigned short int port);
-extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr);
+extern int map_smb_to_linux_error(char *buf, bool logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifs_tcon *, int /* length of
fixed section (word count) in two byte units */);
@@ -116,7 +115,7 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
void **request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp);
-extern __u16 GetNextMid(struct TCP_Server_Info *server);
+extern __u64 GetNextMid(struct TCP_Server_Info *server);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
@@ -484,18 +483,25 @@ int cifs_async_readv(struct cifs_readdata *rdata);
/* asynchronous write support */
struct cifs_writedata {
struct kref refcount;
+ struct list_head list;
+ struct completion done;
enum writeback_sync_modes sync_mode;
struct work_struct work;
struct cifsFileInfo *cfile;
__u64 offset;
+ pid_t pid;
unsigned int bytes;
int result;
+ void (*marshal_iov) (struct kvec *iov,
+ struct cifs_writedata *wdata);
unsigned int nr_pages;
struct page *pages[1];
};
int cifs_async_writev(struct cifs_writedata *wdata);
-struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages);
+void cifs_writev_complete(struct work_struct *work);
+struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
+ work_func_t complete);
void cifs_writedata_release(struct kref *refcount);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 70aac35c398f..8fecc99be344 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -696,7 +696,7 @@ CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
if (rc)
return rc;
- rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0);
if (rc)
cFYI(1, "Tree disconnect failed %d", rc);
@@ -792,7 +792,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
pSMB->hdr.Uid = ses->Suid;
pSMB->AndXCommand = 0xFF;
- rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0);
session_already_dead:
mutex_unlock(&ses->session_mutex);
@@ -1414,8 +1414,7 @@ cifs_readdata_free(struct cifs_readdata *rdata)
static int
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
- READ_RSP *rsp = (READ_RSP *)server->smallbuf;
- unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
+ unsigned int rfclen = get_rfc1002_length(server->smallbuf);
int remaining = rfclen + 4 - server->total_read;
struct cifs_readdata *rdata = mid->callback_data;
@@ -1424,7 +1423,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = cifs_read_from_socket(server, server->bigbuf,
min_t(unsigned int, remaining,
- CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
+ CIFSMaxBufSize + max_header_size()));
if (length < 0)
return length;
server->total_read += length;
@@ -1435,19 +1434,40 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return 0;
}
+static inline size_t
+read_rsp_size(void)
+{
+ return sizeof(READ_RSP);
+}
+
+static inline unsigned int
+read_data_offset(char *buf)
+{
+ READ_RSP *rsp = (READ_RSP *)buf;
+ return le16_to_cpu(rsp->DataOffset);
+}
+
+static inline unsigned int
+read_data_length(char *buf)
+{
+ READ_RSP *rsp = (READ_RSP *)buf;
+ return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+ le16_to_cpu(rsp->DataLength);
+}
+
static int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length, len;
unsigned int data_offset, remaining, data_len;
struct cifs_readdata *rdata = mid->callback_data;
- READ_RSP *rsp = (READ_RSP *)server->smallbuf;
- unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
+ char *buf = server->smallbuf;
+ unsigned int buflen = get_rfc1002_length(buf) + 4;
u64 eof;
pgoff_t eof_index;
struct page *page, *tpage;
- cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
+ cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
mid->mid, rdata->offset, rdata->bytes);
/*
@@ -1455,10 +1475,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* can if there's not enough data. At this point, we've read down to
* the Mid.
*/
- len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
- sizeof(struct smb_hdr) + 1;
+ len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
- rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
+ rdata->iov[0].iov_base = buf + header_size() - 1;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1467,7 +1486,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
server->total_read += length;
/* Was the SMB read successful? */
- rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
+ rdata->result = map_smb_to_linux_error(buf, false);
if (rdata->result != 0) {
cFYI(1, "%s: server returned error %d", __func__,
rdata->result);
@@ -1475,14 +1494,14 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* Is there enough to get to the rest of the READ_RSP header? */
- if (server->total_read < sizeof(READ_RSP)) {
+ if (server->total_read < read_rsp_size()) {
cFYI(1, "%s: server returned short header. got=%u expected=%zu",
- __func__, server->total_read, sizeof(READ_RSP));
+ __func__, server->total_read, read_rsp_size());
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
- data_offset = le16_to_cpu(rsp->DataOffset) + 4;
+ data_offset = read_data_offset(buf) + 4;
if (data_offset < server->total_read) {
/*
* win2k8 sometimes sends an offset of 0 when the read
@@ -1506,7 +1525,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
len = data_offset - server->total_read;
if (len > 0) {
/* read any junk before data into the rest of smallbuf */
- rdata->iov[0].iov_base = server->smallbuf + server->total_read;
+ rdata->iov[0].iov_base = buf + server->total_read;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
if (length < 0)
@@ -1515,15 +1534,14 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* set up first iov for signature check */
- rdata->iov[0].iov_base = server->smallbuf;
+ rdata->iov[0].iov_base = buf;
rdata->iov[0].iov_len = server->total_read;
cFYI(1, "0: iov_base=%p iov_len=%zu",
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
/* how much data is in the response? */
- data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
- data_len += le16_to_cpu(rsp->DataLength);
- if (data_offset + data_len > rfclen) {
+ data_len = read_data_length(buf);
+ if (data_offset + data_len > buflen) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
@@ -1602,11 +1620,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->bytes = length;
- cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
- rfclen, remaining);
+ cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
+ buflen, remaining);
/* discard anything left over */
- if (server->total_read < rfclen)
+ if (server->total_read < buflen)
return cifs_readv_discard(server, mid);
dequeue_mid(mid, false);
@@ -1647,10 +1665,10 @@ cifs_readv_callback(struct mid_q_entry *mid)
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
- cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
- mid->mid, mid->midState, rdata->result, rdata->bytes);
+ cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
+ mid->mid, mid->mid_state, rdata->result, rdata->bytes);
- switch (mid->midState) {
+ switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
/* result already set, check signature */
if (server->sec_mode &
@@ -1671,7 +1689,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
rdata->result = -EIO;
}
- queue_work(system_nrt_wq, &rdata->work);
+ queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
cifs_add_credits(server, 1);
}
@@ -2017,7 +2035,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
kref_put(&wdata->refcount, cifs_writedata_release);
}
-static void
+void
cifs_writev_complete(struct work_struct *work)
{
struct cifs_writedata *wdata = container_of(work,
@@ -2026,7 +2044,9 @@ cifs_writev_complete(struct work_struct *work)
int i = 0;
if (wdata->result == 0) {
+ spin_lock(&inode->i_lock);
cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
+ spin_unlock(&inode->i_lock);
cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
wdata->bytes);
} else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
@@ -2047,7 +2067,7 @@ cifs_writev_complete(struct work_struct *work)
}
struct cifs_writedata *
-cifs_writedata_alloc(unsigned int nr_pages)
+cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
{
struct cifs_writedata *wdata;
@@ -2061,14 +2081,16 @@ cifs_writedata_alloc(unsigned int nr_pages)
wdata = kzalloc(sizeof(*wdata) +
sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
if (wdata != NULL) {
- INIT_WORK(&wdata->work, cifs_writev_complete);
kref_init(&wdata->refcount);
+ INIT_LIST_HEAD(&wdata->list);
+ init_completion(&wdata->done);
+ INIT_WORK(&wdata->work, complete);
}
return wdata;
}
/*
- * Check the midState and signature on received buffer (if any), and queue the
+ * Check the mid_state and signature on received buffer (if any), and queue the
* workqueue completion task.
*/
static void
@@ -2079,7 +2101,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
unsigned int written;
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
- switch (mid->midState) {
+ switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
if (wdata->result != 0)
@@ -2111,7 +2133,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
break;
}
- queue_work(system_nrt_wq, &wdata->work);
+ queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
cifs_add_credits(tcon->ses->server, 1);
}
@@ -2124,7 +2146,6 @@ cifs_async_writev(struct cifs_writedata *wdata)
WRITE_REQ *smb = NULL;
int wct;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
- struct inode *inode = wdata->cfile->dentry->d_inode;
struct kvec *iov = NULL;
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
@@ -2148,8 +2169,8 @@ cifs_async_writev(struct cifs_writedata *wdata)
goto async_writev_out;
}
- smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
- smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
+ smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
+ smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = wdata->cfile->netfid;
@@ -2167,15 +2188,13 @@ cifs_async_writev(struct cifs_writedata *wdata)
iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
iov[0].iov_base = smb;
- /* marshal up the pages into iov array */
- wdata->bytes = 0;
- for (i = 0; i < wdata->nr_pages; i++) {
- iov[i + 1].iov_len = min(inode->i_size -
- page_offset(wdata->pages[i]),
- (loff_t)PAGE_CACHE_SIZE);
- iov[i + 1].iov_base = kmap(wdata->pages[i]);
- wdata->bytes += iov[i + 1].iov_len;
- }
+ /*
+ * This function should marshal up the page array into the kvec
+ * array, reserving [0] for the header. It should kmap the pages
+ * and set the iov_len properly for each one. It may also set
+ * wdata->bytes too.
+ */
+ wdata->marshal_iov(iov, wdata);
cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
@@ -2420,8 +2439,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
(struct smb_hdr *) pSMB, &bytes_returned);
cifs_small_buf_release(pSMB);
} else {
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
- timeout);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, timeout);
/* SMB buffer freed by function above */
}
cifs_stats_inc(&tcon->num_locks);
@@ -2588,7 +2606,7 @@ CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
pSMB->FileID = (__u16) smb_file_id;
pSMB->LastWriteTime = 0xFFFFFFFF;
pSMB->ByteCount = 0;
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
cifs_stats_inc(&tcon->num_closes);
if (rc) {
if (rc != -EINTR) {
@@ -2617,7 +2635,7 @@ CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
pSMB->FileID = (__u16) smb_file_id;
pSMB->ByteCount = 0;
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
cifs_stats_inc(&tcon->num_flushes);
if (rc)
cERROR(1, "Send error in Flush = %d", rc);
@@ -4625,7 +4643,7 @@ CIFSFindClose(const int xid, struct cifs_tcon *tcon,
pSMB->FileID = searchHandle;
pSMB->ByteCount = 0;
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cERROR(1, "Send error in FindClose = %d", rc);
@@ -5646,7 +5664,7 @@ CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc) {
cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
}
@@ -5715,7 +5733,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
@@ -5774,7 +5792,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
*data_offset = delete_file ? 1 : 0;
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cFYI(1, "Send error in SetFileDisposition = %d", rc);
@@ -6006,7 +6024,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
cifs_fill_unix_set_info(data_offset, args);
- rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5560e1d5e54b..302a15c505a9 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -40,6 +40,8 @@
#include <linux/module.h>
#include <keys/user-type.h>
#include <net/ipv6.h>
+#include <linux/parser.h>
+
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -63,6 +65,193 @@ extern mempool_t *cifs_req_poolp;
#define TLINK_ERROR_EXPIRE (1 * HZ)
#define TLINK_IDLE_EXPIRE (600 * HZ)
+enum {
+
+ /* Mount options that take no arguments */
+ Opt_user_xattr, Opt_nouser_xattr,
+ Opt_forceuid, Opt_noforceuid,
+ Opt_noblocksend, Opt_noautotune,
+ Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
+ Opt_mapchars, Opt_nomapchars, Opt_sfu,
+ Opt_nosfu, Opt_nodfs, Opt_posixpaths,
+ Opt_noposixpaths, Opt_nounix,
+ Opt_nocase,
+ Opt_brl, Opt_nobrl,
+ Opt_forcemandatorylock, Opt_setuids,
+ Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
+ Opt_nohard, Opt_nosoft,
+ Opt_nointr, Opt_intr,
+ Opt_nostrictsync, Opt_strictsync,
+ Opt_serverino, Opt_noserverino,
+ Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl,
+ Opt_acl, Opt_noacl, Opt_locallease,
+ Opt_sign, Opt_seal, Opt_direct,
+ Opt_strictcache, Opt_noac,
+ Opt_fsc, Opt_mfsymlinks,
+ Opt_multiuser, Opt_sloppy,
+
+ /* Mount options which take numeric value */
+ Opt_backupuid, Opt_backupgid, Opt_uid,
+ Opt_cruid, Opt_gid, Opt_file_mode,
+ Opt_dirmode, Opt_port,
+ Opt_rsize, Opt_wsize, Opt_actimeo,
+
+ /* Mount options which take string value */
+ Opt_user, Opt_pass, Opt_ip,
+ Opt_unc, Opt_domain,
+ Opt_srcaddr, Opt_prefixpath,
+ Opt_iocharset, Opt_sockopt,
+ Opt_netbiosname, Opt_servern,
+ Opt_ver, Opt_sec,
+
+ /* Mount options to be ignored */
+ Opt_ignore,
+
+ /* Options which could be blank */
+ Opt_blank_pass,
+
+ Opt_err
+};
+
+static const match_table_t cifs_mount_option_tokens = {
+
+ { Opt_user_xattr, "user_xattr" },
+ { Opt_nouser_xattr, "nouser_xattr" },
+ { Opt_forceuid, "forceuid" },
+ { Opt_noforceuid, "noforceuid" },
+ { Opt_noblocksend, "noblocksend" },
+ { Opt_noautotune, "noautotune" },
+ { Opt_hard, "hard" },
+ { Opt_soft, "soft" },
+ { Opt_perm, "perm" },
+ { Opt_noperm, "noperm" },
+ { Opt_mapchars, "mapchars" },
+ { Opt_nomapchars, "nomapchars" },
+ { Opt_sfu, "sfu" },
+ { Opt_nosfu, "nosfu" },
+ { Opt_nodfs, "nodfs" },
+ { Opt_posixpaths, "posixpaths" },
+ { Opt_noposixpaths, "noposixpaths" },
+ { Opt_nounix, "nounix" },
+ { Opt_nounix, "nolinux" },
+ { Opt_nocase, "nocase" },
+ { Opt_nocase, "ignorecase" },
+ { Opt_brl, "brl" },
+ { Opt_nobrl, "nobrl" },
+ { Opt_nobrl, "nolock" },
+ { Opt_forcemandatorylock, "forcemandatorylock" },
+ { Opt_forcemandatorylock, "forcemand" },
+ { Opt_setuids, "setuids" },
+ { Opt_nosetuids, "nosetuids" },
+ { Opt_dynperm, "dynperm" },
+ { Opt_nodynperm, "nodynperm" },
+ { Opt_nohard, "nohard" },
+ { Opt_nosoft, "nosoft" },
+ { Opt_nointr, "nointr" },
+ { Opt_intr, "intr" },
+ { Opt_nostrictsync, "nostrictsync" },
+ { Opt_strictsync, "strictsync" },
+ { Opt_serverino, "serverino" },
+ { Opt_noserverino, "noserverino" },
+ { Opt_rwpidforward, "rwpidforward" },
+ { Opt_cifsacl, "cifsacl" },
+ { Opt_nocifsacl, "nocifsacl" },
+ { Opt_acl, "acl" },
+ { Opt_noacl, "noacl" },
+ { Opt_locallease, "locallease" },
+ { Opt_sign, "sign" },
+ { Opt_seal, "seal" },
+ { Opt_direct, "direct" },
+ { Opt_direct, "forceddirectio" },
+ { Opt_strictcache, "strictcache" },
+ { Opt_noac, "noac" },
+ { Opt_fsc, "fsc" },
+ { Opt_mfsymlinks, "mfsymlinks" },
+ { Opt_multiuser, "multiuser" },
+ { Opt_sloppy, "sloppy" },
+
+ { Opt_backupuid, "backupuid=%s" },
+ { Opt_backupgid, "backupgid=%s" },
+ { Opt_uid, "uid=%s" },
+ { Opt_cruid, "cruid=%s" },
+ { Opt_gid, "gid=%s" },
+ { Opt_file_mode, "file_mode=%s" },
+ { Opt_dirmode, "dirmode=%s" },
+ { Opt_dirmode, "dir_mode=%s" },
+ { Opt_port, "port=%s" },
+ { Opt_rsize, "rsize=%s" },
+ { Opt_wsize, "wsize=%s" },
+ { Opt_actimeo, "actimeo=%s" },
+
+ { Opt_user, "user=%s" },
+ { Opt_user, "username=%s" },
+ { Opt_blank_pass, "pass=" },
+ { Opt_pass, "pass=%s" },
+ { Opt_pass, "password=%s" },
+ { Opt_ip, "ip=%s" },
+ { Opt_ip, "addr=%s" },
+ { Opt_unc, "unc=%s" },
+ { Opt_unc, "target=%s" },
+ { Opt_unc, "path=%s" },
+ { Opt_domain, "dom=%s" },
+ { Opt_domain, "domain=%s" },
+ { Opt_domain, "workgroup=%s" },
+ { Opt_srcaddr, "srcaddr=%s" },
+ { Opt_prefixpath, "prefixpath=%s" },
+ { Opt_iocharset, "iocharset=%s" },
+ { Opt_sockopt, "sockopt=%s" },
+ { Opt_netbiosname, "netbiosname=%s" },
+ { Opt_servern, "servern=%s" },
+ { Opt_ver, "ver=%s" },
+ { Opt_ver, "vers=%s" },
+ { Opt_ver, "version=%s" },
+ { Opt_sec, "sec=%s" },
+
+ { Opt_ignore, "cred" },
+ { Opt_ignore, "credentials" },
+ { Opt_ignore, "guest" },
+ { Opt_ignore, "rw" },
+ { Opt_ignore, "ro" },
+ { Opt_ignore, "suid" },
+ { Opt_ignore, "nosuid" },
+ { Opt_ignore, "exec" },
+ { Opt_ignore, "noexec" },
+ { Opt_ignore, "nodev" },
+ { Opt_ignore, "noauto" },
+ { Opt_ignore, "dev" },
+ { Opt_ignore, "mand" },
+ { Opt_ignore, "nomand" },
+ { Opt_ignore, "_netdev" },
+
+ { Opt_err, NULL }
+};
+
+enum {
+ Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
+ Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
+ Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
+ Opt_sec_nontlm, Opt_sec_lanman,
+ Opt_sec_none,
+
+ Opt_sec_err
+};
+
+static const match_table_t cifs_secflavor_tokens = {
+ { Opt_sec_krb5, "krb5" },
+ { Opt_sec_krb5i, "krb5i" },
+ { Opt_sec_krb5p, "krb5p" },
+ { Opt_sec_ntlmsspi, "ntlmsspi" },
+ { Opt_sec_ntlmssp, "ntlmssp" },
+ { Opt_ntlm, "ntlm" },
+ { Opt_sec_ntlmi, "ntlmi" },
+ { Opt_sec_ntlmv2i, "ntlmv2i" },
+ { Opt_sec_nontlm, "nontlm" },
+ { Opt_sec_lanman, "lanman" },
+ { Opt_sec_none, "none" },
+
+ { Opt_sec_err, NULL }
+};
+
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -143,8 +332,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- if (mid_entry->midState == MID_REQUEST_SUBMITTED)
- mid_entry->midState = MID_RETRY_NEEDED;
+ if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
+ mid_entry->mid_state = MID_RETRY_NEEDED;
list_move(&mid_entry->qhead, &retry_list);
}
spin_unlock(&GlobalMid_Lock);
@@ -183,8 +372,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
-EINVAL = invalid transact2
*/
-static int check2ndT2(struct smb_hdr *pSMB)
+static int check2ndT2(char *buf)
{
+ struct smb_hdr *pSMB = (struct smb_hdr *)buf;
struct smb_t2_rsp *pSMBt;
int remaining;
__u16 total_data_size, data_in_this_rsp;
@@ -224,10 +414,10 @@ static int check2ndT2(struct smb_hdr *pSMB)
return remaining;
}
-static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
+static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
{
- struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond;
- struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB;
+ struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
+ struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
char *data_area_of_tgt;
char *data_area_of_src;
int remaining;
@@ -280,23 +470,23 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
/* fix up the BCC */
- byte_count = get_bcc(pTargetSMB);
+ byte_count = get_bcc(target_hdr);
byte_count += total_in_src;
/* is the result too big for the field? */
if (byte_count > USHRT_MAX) {
cFYI(1, "coalesced BCC too large (%u)", byte_count);
return -EPROTO;
}
- put_bcc(byte_count, pTargetSMB);
+ put_bcc(byte_count, target_hdr);
- byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
+ byte_count = be32_to_cpu(target_hdr->smb_buf_length);
byte_count += total_in_src;
/* don't allow buffer to overflow */
if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
return -ENOBUFS;
}
- pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
+ target_hdr->smb_buf_length = cpu_to_be32(byte_count);
/* copy second buffer into end of first buffer */
memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
@@ -334,7 +524,7 @@ cifs_echo_request(struct work_struct *work)
server->hostname);
requeue_echo:
- queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
+ queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL);
}
static bool
@@ -350,7 +540,7 @@ allocate_buffers(struct TCP_Server_Info *server)
}
} else if (server->large_buf) {
/* we are reusing a dirty large buf, clear its start */
- memset(server->bigbuf, 0, sizeof(struct smb_hdr));
+ memset(server->bigbuf, 0, header_size());
}
if (!server->smallbuf) {
@@ -364,7 +554,7 @@ allocate_buffers(struct TCP_Server_Info *server)
/* beginning of smb buffer is cleared in our buf_get */
} else {
/* if existing small buf clear beginning */
- memset(server->smallbuf, 0, sizeof(struct smb_hdr));
+ memset(server->smallbuf, 0, header_size());
}
return true;
@@ -566,15 +756,16 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
}
static struct mid_q_entry *
-find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
+find_mid(struct TCP_Server_Info *server, char *buffer)
{
+ struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid;
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (mid->mid == buf->Mid &&
- mid->midState == MID_REQUEST_SUBMITTED &&
- mid->command == buf->Command) {
+ mid->mid_state == MID_REQUEST_SUBMITTED &&
+ le16_to_cpu(mid->command) == buf->Command) {
spin_unlock(&GlobalMid_Lock);
return mid;
}
@@ -591,16 +782,16 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
#endif
spin_lock(&GlobalMid_Lock);
if (!malformed)
- mid->midState = MID_RESPONSE_RECEIVED;
+ mid->mid_state = MID_RESPONSE_RECEIVED;
else
- mid->midState = MID_RESPONSE_MALFORMED;
+ mid->mid_state = MID_RESPONSE_MALFORMED;
list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
static void
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
- struct smb_hdr *buf, int malformed)
+ char *buf, int malformed)
{
if (malformed == 0 && check2ndT2(buf) > 0) {
mid->multiRsp = true;
@@ -620,13 +811,13 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
} else {
/* Have first buffer */
mid->resp_buf = buf;
- mid->largeBuf = true;
+ mid->large_buf = true;
server->bigbuf = NULL;
}
return;
}
mid->resp_buf = buf;
- mid->largeBuf = server->large_buf;
+ mid->large_buf = server->large_buf;
/* Was previous buf put in mpx struct for multi-rsp? */
if (!mid->multiRsp) {
/* smb buffer will be freed by user thread */
@@ -682,8 +873,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
- mid_entry->midState = MID_SHUTDOWN;
+ cFYI(1, "Clearing mid 0x%llx", mid_entry->mid);
+ mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
}
spin_unlock(&GlobalMid_Lock);
@@ -691,7 +882,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Callback mid 0x%x", mid_entry->mid);
+ cFYI(1, "Callback mid 0x%llx", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
@@ -731,11 +922,10 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length;
char *buf = server->smallbuf;
- struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
- unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
+ unsigned int pdu_length = get_rfc1002_length(buf);
/* make sure this will fit in a large buffer */
- if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+ if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
cERROR(1, "SMB response too long (%u bytes)",
pdu_length);
cifs_reconnect(server);
@@ -746,20 +936,18 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
/* switch to large buffer if too big for a small one */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
server->large_buf = true;
- memcpy(server->bigbuf, server->smallbuf, server->total_read);
+ memcpy(server->bigbuf, buf, server->total_read);
buf = server->bigbuf;
- smb_buffer = (struct smb_hdr *)buf;
}
/* now read the rest */
- length = cifs_read_from_socket(server,
- buf + sizeof(struct smb_hdr) - 1,
- pdu_length - sizeof(struct smb_hdr) + 1 + 4);
+ length = cifs_read_from_socket(server, buf + header_size() - 1,
+ pdu_length - header_size() + 1 + 4);
if (length < 0)
return length;
server->total_read += length;
- dump_smb(smb_buffer, server->total_read);
+ dump_smb(buf, server->total_read);
/*
* We know that we received enough to get to the MID as we
@@ -770,7 +958,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
- length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
+ length = checkSMB(buf, server->total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
@@ -778,7 +966,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
if (!mid)
return length;
- handle_mid(mid, server, smb_buffer, length);
+ handle_mid(mid, server, buf, length);
return 0;
}
@@ -789,7 +977,6 @@ cifs_demultiplex_thread(void *p)
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
char *buf = NULL;
- struct smb_hdr *smb_buffer = NULL;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
@@ -810,7 +997,6 @@ cifs_demultiplex_thread(void *p)
continue;
server->large_buf = false;
- smb_buffer = (struct smb_hdr *)server->smallbuf;
buf = server->smallbuf;
pdu_length = 4; /* enough to get RFC1001 header */
@@ -823,14 +1009,14 @@ cifs_demultiplex_thread(void *p)
* The right amount was read from socket - 4 bytes,
* so we can now interpret the length field.
*/
- pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
+ pdu_length = get_rfc1002_length(buf);
cFYI(1, "RFC1002 header 0x%x", pdu_length);
if (!is_smb_response(server, buf[0]))
continue;
/* make sure we have enough to get to the MID */
- if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
+ if (pdu_length < header_size() - 1 - 4) {
cERROR(1, "SMB response too short (%u bytes)",
pdu_length);
cifs_reconnect(server);
@@ -840,12 +1026,12 @@ cifs_demultiplex_thread(void *p)
/* read down to the MID */
length = cifs_read_from_socket(server, buf + 4,
- sizeof(struct smb_hdr) - 1 - 4);
+ header_size() - 1 - 4);
if (length < 0)
continue;
server->total_read += length;
- mid_entry = find_mid(server, smb_buffer);
+ mid_entry = find_mid(server, buf);
if (!mid_entry || !mid_entry->receive)
length = standard_receive3(server, mid_entry);
@@ -855,22 +1041,19 @@ cifs_demultiplex_thread(void *p)
if (length < 0)
continue;
- if (server->large_buf) {
+ if (server->large_buf)
buf = server->bigbuf;
- smb_buffer = (struct smb_hdr *)buf;
- }
server->lstrp = jiffies;
if (mid_entry != NULL) {
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
- } else if (!is_valid_oplock_break(smb_buffer, server)) {
+ } else if (!is_valid_oplock_break(buf, server)) {
cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", buf,
- sizeof(struct smb_hdr));
+ cifs_dump_mem("Received Data is: ", buf, header_size());
#ifdef CONFIG_CIFS_DEBUG2
- cifs_dump_detail(smb_buffer);
+ cifs_dump_detail(buf);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
@@ -926,23 +1109,95 @@ extract_hostname(const char *unc)
return dst;
}
+static int get_option_ul(substring_t args[], unsigned long *option)
+{
+ int rc;
+ char *string;
+
+ string = match_strdup(args);
+ if (string == NULL)
+ return -ENOMEM;
+ rc = kstrtoul(string, 10, option);
+ kfree(string);
+
+ return rc;
+}
+
+
+static int cifs_parse_security_flavors(char *value,
+ struct smb_vol *vol)
+{
+
+ substring_t args[MAX_OPT_ARGS];
+
+ switch (match_token(value, cifs_secflavor_tokens, args)) {
+ case Opt_sec_krb5:
+ vol->secFlg |= CIFSSEC_MAY_KRB5;
+ break;
+ case Opt_sec_krb5i:
+ vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
+ break;
+ case Opt_sec_krb5p:
+ /* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */
+ cERROR(1, "Krb5 cifs privacy not supported");
+ break;
+ case Opt_sec_ntlmssp:
+ vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
+ break;
+ case Opt_sec_ntlmsspi:
+ vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN;
+ break;
+ case Opt_ntlm:
+ /* ntlm is default so can be turned off too */
+ vol->secFlg |= CIFSSEC_MAY_NTLM;
+ break;
+ case Opt_sec_ntlmi:
+ vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
+ break;
+ case Opt_sec_nontlm:
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2;
+ break;
+ case Opt_sec_ntlmv2i:
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN;
+ break;
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ case Opt_sec_lanman:
+ vol->secFlg |= CIFSSEC_MAY_LANMAN;
+ break;
+#endif
+ case Opt_sec_none:
+ vol->nullauth = 1;
+ break;
+ default:
+ cERROR(1, "bad security option: %s", value);
+ return 1;
+ }
+
+ return 0;
+}
+
static int
cifs_parse_mount_options(const char *mountdata, const char *devname,
struct smb_vol *vol)
{
- char *value, *data, *end;
+ char *data, *end;
char *mountdata_copy = NULL, *options;
- int err;
unsigned int temp_len, i, j;
char separator[2];
short int override_uid = -1;
short int override_gid = -1;
bool uid_specified = false;
bool gid_specified = false;
+ bool sloppy = false;
+ char *invalid = NULL;
char *nodename = utsname()->nodename;
+ char *string = NULL;
+ char *tmp_end, *value;
+ char delim;
separator[0] = ',';
separator[1] = 0;
+ delim = separator[0];
/*
* does not have to be perfect mapping since field is
@@ -981,6 +1236,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
options = mountdata_copy;
end = options + strlen(options);
+
if (strncmp(options, "sep=", 4) == 0) {
if (options[4] != 0) {
separator[0] = options[4];
@@ -993,609 +1249,652 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->backupgid_specified = false; /* no backup intent for a group */
while ((data = strsep(&options, separator)) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long option;
+ int token;
+
if (!*data)
continue;
- if ((value = strchr(data, '=')) != NULL)
- *value++ = '\0';
- /* Have to parse this before we parse for "user" */
- if (strnicmp(data, "user_xattr", 10) == 0) {
+ token = match_token(data, cifs_mount_option_tokens, args);
+
+ switch (token) {
+
+ /* Ingnore the following */
+ case Opt_ignore:
+ break;
+
+ /* Boolean values */
+ case Opt_user_xattr:
vol->no_xattr = 0;
- } else if (strnicmp(data, "nouser_xattr", 12) == 0) {
+ break;
+ case Opt_nouser_xattr:
vol->no_xattr = 1;
- } else if (strnicmp(data, "user", 4) == 0) {
- if (!value) {
- printk(KERN_WARNING
- "CIFS: invalid or missing username\n");
- goto cifs_parse_mount_err;
- } else if (!*value) {
- /* null user, ie anonymous, authentication */
- vol->nullauth = 1;
- }
- if (strnlen(value, MAX_USERNAME_SIZE) <
- MAX_USERNAME_SIZE) {
- vol->username = kstrdup(value, GFP_KERNEL);
- if (!vol->username) {
- printk(KERN_WARNING "CIFS: no memory "
- "for username\n");
- goto cifs_parse_mount_err;
- }
- } else {
- printk(KERN_WARNING "CIFS: username too long\n");
- goto cifs_parse_mount_err;
- }
- } else if (strnicmp(data, "pass", 4) == 0) {
- if (!value) {
- vol->password = NULL;
- continue;
- } else if (value[0] == 0) {
- /* check if string begins with double comma
- since that would mean the password really
- does start with a comma, and would not
- indicate an empty string */
- if (value[1] != separator[0]) {
- vol->password = NULL;
- continue;
- }
- }
- temp_len = strlen(value);
- /* removed password length check, NTLM passwords
- can be arbitrarily long */
-
- /* if comma in password, the string will be
- prematurely null terminated. Commas in password are
- specified across the cifs mount interface by a double
- comma ie ,, and a comma used as in other cases ie ','
- as a parameter delimiter/separator is single and due
- to the strsep above is temporarily zeroed. */
-
- /* NB: password legally can have multiple commas and
- the only illegal character in a password is null */
-
- if ((value[temp_len] == 0) &&
- (value + temp_len < end) &&
- (value[temp_len+1] == separator[0])) {
- /* reinsert comma */
- value[temp_len] = separator[0];
- temp_len += 2; /* move after second comma */
- while (value[temp_len] != 0) {
- if (value[temp_len] == separator[0]) {
- if (value[temp_len+1] ==
- separator[0]) {
- /* skip second comma */
- temp_len++;
- } else {
- /* single comma indicating start
- of next parm */
- break;
- }
- }
- temp_len++;
- }
- if (value[temp_len] == 0) {
- options = NULL;
- } else {
- value[temp_len] = 0;
- /* point option to start of next parm */
- options = value + temp_len + 1;
- }
- /* go from value to value + temp_len condensing
- double commas to singles. Note that this ends up
- allocating a few bytes too many, which is ok */
- vol->password = kzalloc(temp_len, GFP_KERNEL);
- if (vol->password == NULL) {
- printk(KERN_WARNING "CIFS: no memory "
- "for password\n");
- goto cifs_parse_mount_err;
- }
- for (i = 0, j = 0; i < temp_len; i++, j++) {
- vol->password[j] = value[i];
- if (value[i] == separator[0]
- && value[i+1] == separator[0]) {
- /* skip second comma */
- i++;
- }
- }
- vol->password[j] = 0;
- } else {
- vol->password = kzalloc(temp_len+1, GFP_KERNEL);
- if (vol->password == NULL) {
- printk(KERN_WARNING "CIFS: no memory "
- "for password\n");
- goto cifs_parse_mount_err;
- }
- strcpy(vol->password, value);
- }
- } else if (!strnicmp(data, "ip", 2) ||
- !strnicmp(data, "addr", 4)) {
- if (!value || !*value) {
- vol->UNCip = NULL;
- } else if (strnlen(value, INET6_ADDRSTRLEN) <
- INET6_ADDRSTRLEN) {
- vol->UNCip = kstrdup(value, GFP_KERNEL);
- if (!vol->UNCip) {
- printk(KERN_WARNING "CIFS: no memory "
- "for UNC IP\n");
- goto cifs_parse_mount_err;
- }
- } else {
- printk(KERN_WARNING "CIFS: ip address "
- "too long\n");
- goto cifs_parse_mount_err;
- }
- } else if (strnicmp(data, "sec", 3) == 0) {
- if (!value || !*value) {
- cERROR(1, "no security value specified");
- continue;
- } else if (strnicmp(value, "krb5i", 5) == 0) {
- vol->secFlg |= CIFSSEC_MAY_KRB5 |
- CIFSSEC_MUST_SIGN;
- } else if (strnicmp(value, "krb5p", 5) == 0) {
- /* vol->secFlg |= CIFSSEC_MUST_SEAL |
- CIFSSEC_MAY_KRB5; */
- cERROR(1, "Krb5 cifs privacy not supported");
- goto cifs_parse_mount_err;
- } else if (strnicmp(value, "krb5", 4) == 0) {
- vol->secFlg |= CIFSSEC_MAY_KRB5;
- } else if (strnicmp(value, "ntlmsspi", 8) == 0) {
- vol->secFlg |= CIFSSEC_MAY_NTLMSSP |
- CIFSSEC_MUST_SIGN;
- } else if (strnicmp(value, "ntlmssp", 7) == 0) {
- vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
- } else if (strnicmp(value, "ntlmv2i", 7) == 0) {
- vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
- CIFSSEC_MUST_SIGN;
- } else if (strnicmp(value, "ntlmv2", 6) == 0) {
- vol->secFlg |= CIFSSEC_MAY_NTLMV2;
- } else if (strnicmp(value, "ntlmi", 5) == 0) {
- vol->secFlg |= CIFSSEC_MAY_NTLM |
- CIFSSEC_MUST_SIGN;
- } else if (strnicmp(value, "ntlm", 4) == 0) {
- /* ntlm is default so can be turned off too */
- vol->secFlg |= CIFSSEC_MAY_NTLM;
- } else if (strnicmp(value, "nontlm", 6) == 0) {
- /* BB is there a better way to do this? */
- vol->secFlg |= CIFSSEC_MAY_NTLMV2;
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
- } else if (strnicmp(value, "lanman", 6) == 0) {
- vol->secFlg |= CIFSSEC_MAY_LANMAN;
-#endif
- } else if (strnicmp(value, "none", 4) == 0) {
- vol->nullauth = 1;
- } else {
- cERROR(1, "bad security option: %s", value);
- goto cifs_parse_mount_err;
- }
- } else if (strnicmp(data, "vers", 3) == 0) {
- if (!value || !*value) {
- cERROR(1, "no protocol version specified"
- " after vers= mount option");
- } else if ((strnicmp(value, "cifs", 4) == 0) ||
- (strnicmp(value, "1", 1) == 0)) {
- /* this is the default */
- continue;
- }
- } else if ((strnicmp(data, "unc", 3) == 0)
- || (strnicmp(data, "target", 6) == 0)
- || (strnicmp(data, "path", 4) == 0)) {
- if (!value || !*value) {
- printk(KERN_WARNING "CIFS: invalid path to "
- "network resource\n");
- goto cifs_parse_mount_err;
- }
- if ((temp_len = strnlen(value, 300)) < 300) {
- vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
- if (vol->UNC == NULL)
- goto cifs_parse_mount_err;
- strcpy(vol->UNC, value);
- if (strncmp(vol->UNC, "//", 2) == 0) {
- vol->UNC[0] = '\\';
- vol->UNC[1] = '\\';
- } else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
- printk(KERN_WARNING
- "CIFS: UNC Path does not begin "
- "with // or \\\\ \n");
- goto cifs_parse_mount_err;
- }
- } else {
- printk(KERN_WARNING "CIFS: UNC name too long\n");
- goto cifs_parse_mount_err;
- }
- } else if ((strnicmp(data, "domain", 3) == 0)
- || (strnicmp(data, "workgroup", 5) == 0)) {
- if (!value || !*value) {
- printk(KERN_WARNING "CIFS: invalid domain name\n");
- goto cifs_parse_mount_err;
- }
- /* BB are there cases in which a comma can be valid in
- a domain name and need special handling? */
- if (strnlen(value, 256) < 256) {
- vol->domainname = kstrdup(value, GFP_KERNEL);
- if (!vol->domainname) {
- printk(KERN_WARNING "CIFS: no memory "
- "for domainname\n");
- goto cifs_parse_mount_err;
- }
- cFYI(1, "Domain name set");
- } else {
- printk(KERN_WARNING "CIFS: domain name too "
- "long\n");
- goto cifs_parse_mount_err;
- }
- } else if (strnicmp(data, "srcaddr", 7) == 0) {
- vol->srcaddr.ss_family = AF_UNSPEC;
-
- if (!value || !*value) {
- printk(KERN_WARNING "CIFS: srcaddr value"
- " not specified.\n");
- goto cifs_parse_mount_err;
- }
- i = cifs_convert_address((struct sockaddr *)&vol->srcaddr,
- value, strlen(value));
- if (i == 0) {
- printk(KERN_WARNING "CIFS: Could not parse"
- " srcaddr: %s\n",
- value);
- goto cifs_parse_mount_err;
- }
- } else if (strnicmp(data, "prefixpath", 10) == 0) {
- if (!value || !*value) {
- printk(KERN_WARNING
- "CIFS: invalid path prefix\n");
- goto cifs_parse_mount_err;
- }
- if ((temp_len = strnlen(value, 1024)) < 1024) {
- if (value[0] != '/')
- temp_len++; /* missing leading slash */
- vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
- if (vol->prepath == NULL)
- goto cifs_parse_mount_err;
- if (value[0] != '/') {
- vol->prepath[0] = '/';
- strcpy(vol->prepath+1, value);
- } else
- strcpy(vol->prepath, value);
- cFYI(1, "prefix path %s", vol->prepath);
- } else {
- printk(KERN_WARNING "CIFS: prefix too long\n");
- goto cifs_parse_mount_err;
- }
- } else if (strnicmp(data, "iocharset", 9) == 0) {
- if (!value || !*value) {
- printk(KERN_WARNING "CIFS: invalid iocharset "
- "specified\n");
- goto cifs_parse_mount_err;
- }
- if (strnlen(value, 65) < 65) {
- if (strnicmp(value, "default", 7)) {
- vol->iocharset = kstrdup(value,
- GFP_KERNEL);
-
- if (!vol->iocharset) {
- printk(KERN_WARNING "CIFS: no "
- "memory for"
- "charset\n");
- goto cifs_parse_mount_err;
- }
- }
- /* if iocharset not set then load_nls_default
- is used by caller */
- cFYI(1, "iocharset set to %s", value);
- } else {
- printk(KERN_WARNING "CIFS: iocharset name "
- "too long.\n");
- goto cifs_parse_mount_err;
- }
- } else if (!strnicmp(data, "uid", 3) && value && *value) {
- vol->linux_uid = simple_strtoul(value, &value, 0);
- uid_specified = true;
- } else if (!strnicmp(data, "cruid", 5) && value && *value) {
- vol->cred_uid = simple_strtoul(value, &value, 0);
- } else if (!strnicmp(data, "forceuid", 8)) {
+ break;
+ case Opt_forceuid:
override_uid = 1;
- } else if (!strnicmp(data, "noforceuid", 10)) {
+ break;
+ case Opt_noforceuid:
override_uid = 0;
- } else if (!strnicmp(data, "gid", 3) && value && *value) {
- vol->linux_gid = simple_strtoul(value, &value, 0);
- gid_specified = true;
- } else if (!strnicmp(data, "forcegid", 8)) {
- override_gid = 1;
- } else if (!strnicmp(data, "noforcegid", 10)) {
- override_gid = 0;
- } else if (strnicmp(data, "file_mode", 4) == 0) {
- if (value && *value) {
- vol->file_mode =
- simple_strtoul(value, &value, 0);
- }
- } else if (strnicmp(data, "dir_mode", 4) == 0) {
- if (value && *value) {
- vol->dir_mode =
- simple_strtoul(value, &value, 0);
- }
- } else if (strnicmp(data, "dirmode", 4) == 0) {
- if (value && *value) {
- vol->dir_mode =
- simple_strtoul(value, &value, 0);
- }
- } else if (strnicmp(data, "port", 4) == 0) {
- if (value && *value) {
- vol->port =
- simple_strtoul(value, &value, 0);
- }
- } else if (strnicmp(data, "rsize", 5) == 0) {
- if (value && *value) {
- vol->rsize =
- simple_strtoul(value, &value, 0);
- }
- } else if (strnicmp(data, "wsize", 5) == 0) {
- if (value && *value) {
- vol->wsize =
- simple_strtoul(value, &value, 0);
- }
- } else if (strnicmp(data, "sockopt", 5) == 0) {
- if (!value || !*value) {
- cERROR(1, "no socket option specified");
- continue;
- } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
- vol->sockopt_tcp_nodelay = 1;
- }
- } else if (strnicmp(data, "netbiosname", 4) == 0) {
- if (!value || !*value || (*value == ' ')) {
- cFYI(1, "invalid (empty) netbiosname");
- } else {
- memset(vol->source_rfc1001_name, 0x20,
- RFC1001_NAME_LEN);
- /*
- * FIXME: are there cases in which a comma can
- * be valid in workstation netbios name (and
- * need special handling)?
- */
- for (i = 0; i < RFC1001_NAME_LEN; i++) {
- /* don't ucase netbiosname for user */
- if (value[i] == 0)
- break;
- vol->source_rfc1001_name[i] = value[i];
- }
- /* The string has 16th byte zero still from
- set at top of the function */
- if (i == RFC1001_NAME_LEN && value[i] != 0)
- printk(KERN_WARNING "CIFS: netbiosname"
- " longer than 15 truncated.\n");
- }
- } else if (strnicmp(data, "servern", 7) == 0) {
- /* servernetbiosname specified override *SMBSERVER */
- if (!value || !*value || (*value == ' ')) {
- cFYI(1, "empty server netbiosname specified");
- } else {
- /* last byte, type, is 0x20 for servr type */
- memset(vol->target_rfc1001_name, 0x20,
- RFC1001_NAME_LEN_WITH_NULL);
-
- for (i = 0; i < 15; i++) {
- /* BB are there cases in which a comma can be
- valid in this workstation netbios name
- (and need special handling)? */
-
- /* user or mount helper must uppercase
- the netbiosname */
- if (value[i] == 0)
- break;
- else
- vol->target_rfc1001_name[i] =
- value[i];
- }
- /* The string has 16th byte zero still from
- set at top of the function */
- if (i == RFC1001_NAME_LEN && value[i] != 0)
- printk(KERN_WARNING "CIFS: server net"
- "biosname longer than 15 truncated.\n");
- }
- } else if (strnicmp(data, "actimeo", 7) == 0) {
- if (value && *value) {
- vol->actimeo = HZ * simple_strtoul(value,
- &value, 0);
- if (vol->actimeo > CIFS_MAX_ACTIMEO) {
- cERROR(1, "CIFS: attribute cache"
- "timeout too large");
- goto cifs_parse_mount_err;
- }
- }
- } else if (strnicmp(data, "credentials", 4) == 0) {
- /* ignore */
- } else if (strnicmp(data, "version", 3) == 0) {
- /* ignore */
- } else if (strnicmp(data, "guest", 5) == 0) {
- /* ignore */
- } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
- /* ignore */
- } else if (strnicmp(data, "ro", 2) == 0) {
- /* ignore */
- } else if (strnicmp(data, "noblocksend", 11) == 0) {
+ break;
+ case Opt_noblocksend:
vol->noblocksnd = 1;
- } else if (strnicmp(data, "noautotune", 10) == 0) {
+ break;
+ case Opt_noautotune:
vol->noautotune = 1;
- } else if ((strnicmp(data, "suid", 4) == 0) ||
- (strnicmp(data, "nosuid", 6) == 0) ||
- (strnicmp(data, "exec", 4) == 0) ||
- (strnicmp(data, "noexec", 6) == 0) ||
- (strnicmp(data, "nodev", 5) == 0) ||
- (strnicmp(data, "noauto", 6) == 0) ||
- (strnicmp(data, "dev", 3) == 0)) {
- /* The mount tool or mount.cifs helper (if present)
- uses these opts to set flags, and the flags are read
- by the kernel vfs layer before we get here (ie
- before read super) so there is no point trying to
- parse these options again and set anything and it
- is ok to just ignore them */
- continue;
- } else if (strnicmp(data, "hard", 4) == 0) {
+ break;
+ case Opt_hard:
vol->retry = 1;
- } else if (strnicmp(data, "soft", 4) == 0) {
+ break;
+ case Opt_soft:
vol->retry = 0;
- } else if (strnicmp(data, "perm", 4) == 0) {
+ break;
+ case Opt_perm:
vol->noperm = 0;
- } else if (strnicmp(data, "noperm", 6) == 0) {
+ break;
+ case Opt_noperm:
vol->noperm = 1;
- } else if (strnicmp(data, "mapchars", 8) == 0) {
+ break;
+ case Opt_mapchars:
vol->remap = 1;
- } else if (strnicmp(data, "nomapchars", 10) == 0) {
+ break;
+ case Opt_nomapchars:
vol->remap = 0;
- } else if (strnicmp(data, "sfu", 3) == 0) {
+ break;
+ case Opt_sfu:
vol->sfu_emul = 1;
- } else if (strnicmp(data, "nosfu", 5) == 0) {
+ break;
+ case Opt_nosfu:
vol->sfu_emul = 0;
- } else if (strnicmp(data, "nodfs", 5) == 0) {
+ break;
+ case Opt_nodfs:
vol->nodfs = 1;
- } else if (strnicmp(data, "posixpaths", 10) == 0) {
+ break;
+ case Opt_posixpaths:
vol->posix_paths = 1;
- } else if (strnicmp(data, "noposixpaths", 12) == 0) {
+ break;
+ case Opt_noposixpaths:
vol->posix_paths = 0;
- } else if (strnicmp(data, "nounix", 6) == 0) {
- vol->no_linux_ext = 1;
- } else if (strnicmp(data, "nolinux", 7) == 0) {
+ break;
+ case Opt_nounix:
vol->no_linux_ext = 1;
- } else if ((strnicmp(data, "nocase", 6) == 0) ||
- (strnicmp(data, "ignorecase", 10) == 0)) {
+ break;
+ case Opt_nocase:
vol->nocase = 1;
- } else if (strnicmp(data, "mand", 4) == 0) {
- /* ignore */
- } else if (strnicmp(data, "nomand", 6) == 0) {
- /* ignore */
- } else if (strnicmp(data, "_netdev", 7) == 0) {
- /* ignore */
- } else if (strnicmp(data, "brl", 3) == 0) {
+ break;
+ case Opt_brl:
vol->nobrl = 0;
- } else if ((strnicmp(data, "nobrl", 5) == 0) ||
- (strnicmp(data, "nolock", 6) == 0)) {
+ break;
+ case Opt_nobrl:
vol->nobrl = 1;
- /* turn off mandatory locking in mode
- if remote locking is turned off since the
- local vfs will do advisory */
+ /*
+ * turn off mandatory locking in mode
+ * if remote locking is turned off since the
+ * local vfs will do advisory
+ */
if (vol->file_mode ==
(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
vol->file_mode = S_IALLUGO;
- } else if (strnicmp(data, "forcemandatorylock", 9) == 0) {
- /* will take the shorter form "forcemand" as well */
- /* This mount option will force use of mandatory
- (DOS/Windows style) byte range locks, instead of
- using posix advisory byte range locks, even if the
- Unix extensions are available and posix locks would
- be supported otherwise. If Unix extensions are not
- negotiated this has no effect since mandatory locks
- would be used (mandatory locks is all that those
- those servers support) */
+ break;
+ case Opt_forcemandatorylock:
vol->mand_lock = 1;
- } else if (strnicmp(data, "setuids", 7) == 0) {
+ break;
+ case Opt_setuids:
vol->setuids = 1;
- } else if (strnicmp(data, "nosetuids", 9) == 0) {
+ break;
+ case Opt_nosetuids:
vol->setuids = 0;
- } else if (strnicmp(data, "dynperm", 7) == 0) {
+ break;
+ case Opt_dynperm:
vol->dynperm = true;
- } else if (strnicmp(data, "nodynperm", 9) == 0) {
+ break;
+ case Opt_nodynperm:
vol->dynperm = false;
- } else if (strnicmp(data, "nohard", 6) == 0) {
+ break;
+ case Opt_nohard:
vol->retry = 0;
- } else if (strnicmp(data, "nosoft", 6) == 0) {
+ break;
+ case Opt_nosoft:
vol->retry = 1;
- } else if (strnicmp(data, "nointr", 6) == 0) {
+ break;
+ case Opt_nointr:
vol->intr = 0;
- } else if (strnicmp(data, "intr", 4) == 0) {
+ break;
+ case Opt_intr:
vol->intr = 1;
- } else if (strnicmp(data, "nostrictsync", 12) == 0) {
+ break;
+ case Opt_nostrictsync:
vol->nostrictsync = 1;
- } else if (strnicmp(data, "strictsync", 10) == 0) {
+ break;
+ case Opt_strictsync:
vol->nostrictsync = 0;
- } else if (strnicmp(data, "serverino", 7) == 0) {
+ break;
+ case Opt_serverino:
vol->server_ino = 1;
- } else if (strnicmp(data, "noserverino", 9) == 0) {
+ break;
+ case Opt_noserverino:
vol->server_ino = 0;
- } else if (strnicmp(data, "rwpidforward", 12) == 0) {
+ break;
+ case Opt_rwpidforward:
vol->rwpidforward = 1;
- } else if (strnicmp(data, "cifsacl", 7) == 0) {
+ break;
+ case Opt_cifsacl:
vol->cifs_acl = 1;
- } else if (strnicmp(data, "nocifsacl", 9) == 0) {
+ break;
+ case Opt_nocifsacl:
vol->cifs_acl = 0;
- } else if (strnicmp(data, "acl", 3) == 0) {
+ break;
+ case Opt_acl:
vol->no_psx_acl = 0;
- } else if (strnicmp(data, "noacl", 5) == 0) {
+ break;
+ case Opt_noacl:
vol->no_psx_acl = 1;
- } else if (strnicmp(data, "locallease", 6) == 0) {
+ break;
+ case Opt_locallease:
vol->local_lease = 1;
- } else if (strnicmp(data, "sign", 4) == 0) {
+ break;
+ case Opt_sign:
vol->secFlg |= CIFSSEC_MUST_SIGN;
- } else if (strnicmp(data, "seal", 4) == 0) {
+ break;
+ case Opt_seal:
/* we do not do the following in secFlags because seal
- is a per tree connection (mount) not a per socket
- or per-smb connection option in the protocol */
- /* vol->secFlg |= CIFSSEC_MUST_SEAL; */
+ * is a per tree connection (mount) not a per socket
+ * or per-smb connection option in the protocol
+ * vol->secFlg |= CIFSSEC_MUST_SEAL;
+ */
vol->seal = 1;
- } else if (strnicmp(data, "direct", 6) == 0) {
- vol->direct_io = 1;
- } else if (strnicmp(data, "forcedirectio", 13) == 0) {
+ break;
+ case Opt_direct:
vol->direct_io = 1;
- } else if (strnicmp(data, "strictcache", 11) == 0) {
+ break;
+ case Opt_strictcache:
vol->strict_io = 1;
- } else if (strnicmp(data, "noac", 4) == 0) {
+ break;
+ case Opt_noac:
printk(KERN_WARNING "CIFS: Mount option noac not "
"supported. Instead set "
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
- } else if (strnicmp(data, "fsc", 3) == 0) {
+ break;
+ case Opt_fsc:
#ifndef CONFIG_CIFS_FSCACHE
cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
"kernel config option set");
goto cifs_parse_mount_err;
#endif
vol->fsc = true;
- } else if (strnicmp(data, "mfsymlinks", 10) == 0) {
+ break;
+ case Opt_mfsymlinks:
vol->mfsymlinks = true;
- } else if (strnicmp(data, "multiuser", 8) == 0) {
+ break;
+ case Opt_multiuser:
vol->multiuser = true;
- } else if (!strnicmp(data, "backupuid", 9) && value && *value) {
- err = kstrtouint(value, 0, &vol->backupuid);
- if (err < 0) {
+ break;
+ case Opt_sloppy:
+ sloppy = true;
+ break;
+
+ /* Numeric Values */
+ case Opt_backupuid:
+ if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid backupuid value",
__func__);
goto cifs_parse_mount_err;
}
+ vol->backupuid = option;
vol->backupuid_specified = true;
- } else if (!strnicmp(data, "backupgid", 9) && value && *value) {
- err = kstrtouint(value, 0, &vol->backupgid);
- if (err < 0) {
+ break;
+ case Opt_backupgid:
+ if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid backupgid value",
__func__);
goto cifs_parse_mount_err;
}
+ vol->backupgid = option;
vol->backupgid_specified = true;
- } else
- printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
- data);
- }
- if (vol->UNC == NULL) {
- if (devname == NULL) {
- printk(KERN_WARNING "CIFS: Missing UNC name for mount "
- "target\n");
- goto cifs_parse_mount_err;
- }
- if ((temp_len = strnlen(devname, 300)) < 300) {
- vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
- if (vol->UNC == NULL)
+ break;
+ case Opt_uid:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid uid value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->linux_uid = option;
+ uid_specified = true;
+ break;
+ case Opt_cruid:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid cruid value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->cred_uid = option;
+ break;
+ case Opt_gid:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid gid value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->linux_gid = option;
+ gid_specified = true;
+ break;
+ case Opt_file_mode:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid file_mode value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->file_mode = option;
+ break;
+ case Opt_dirmode:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid dir_mode value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->dir_mode = option;
+ break;
+ case Opt_port:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid port value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->port = option;
+ break;
+ case Opt_rsize:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid rsize value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->rsize = option;
+ break;
+ case Opt_wsize:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid wsize value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->wsize = option;
+ break;
+ case Opt_actimeo:
+ if (get_option_ul(args, &option)) {
+ cERROR(1, "%s: Invalid actimeo value",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->actimeo = HZ * option;
+ if (vol->actimeo > CIFS_MAX_ACTIMEO) {
+ cERROR(1, "CIFS: attribute cache"
+ "timeout too large");
+ goto cifs_parse_mount_err;
+ }
+ break;
+
+ /* String Arguments */
+
+ case Opt_user:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ /* null user, ie. anonymous authentication */
+ vol->nullauth = 1;
+ } else if (strnlen(string, MAX_USERNAME_SIZE) >
+ MAX_USERNAME_SIZE) {
+ printk(KERN_WARNING "CIFS: username too long\n");
+ goto cifs_parse_mount_err;
+ }
+ vol->username = kstrdup(string, GFP_KERNEL);
+ if (!vol->username) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for username\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
+ case Opt_blank_pass:
+ vol->password = NULL;
+ break;
+ case Opt_pass:
+ /* passwords have to be handled differently
+ * to allow the character used for deliminator
+ * to be passed within them
+ */
+
+ /* Obtain the value string */
+ value = strchr(data, '=');
+ if (value != NULL)
+ *value++ = '\0';
+
+ /* Set tmp_end to end of the string */
+ tmp_end = (char *) value + strlen(value);
+
+ /* Check if following character is the deliminator
+ * If yes, we have encountered a double deliminator
+ * reset the NULL character to the deliminator
+ */
+ if (tmp_end < end && tmp_end[1] == delim)
+ tmp_end[0] = delim;
+
+ /* Keep iterating until we get to a single deliminator
+ * OR the end
+ */
+ while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
+ (tmp_end[1] == delim)) {
+ tmp_end = (char *) &tmp_end[2];
+ }
+
+ /* Reset var options to point to next element */
+ if (tmp_end) {
+ tmp_end[0] = '\0';
+ options = (char *) &tmp_end[1];
+ } else
+ /* Reached the end of the mount option string */
+ options = end;
+
+ /* Now build new password string */
+ temp_len = strlen(value);
+ vol->password = kzalloc(temp_len+1, GFP_KERNEL);
+ if (vol->password == NULL) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for password\n");
goto cifs_parse_mount_err;
- strcpy(vol->UNC, devname);
- if (strncmp(vol->UNC, "//", 2) == 0) {
+ }
+
+ for (i = 0, j = 0; i < temp_len; i++, j++) {
+ vol->password[j] = value[i];
+ if ((value[i] == delim) &&
+ value[i+1] == delim)
+ /* skip the second deliminator */
+ i++;
+ }
+ vol->password[j] = '\0';
+ break;
+ case Opt_ip:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ vol->UNCip = NULL;
+ } else if (strnlen(string, INET6_ADDRSTRLEN) >
+ INET6_ADDRSTRLEN) {
+ printk(KERN_WARNING "CIFS: ip address "
+ "too long\n");
+ goto cifs_parse_mount_err;
+ }
+ vol->UNCip = kstrdup(string, GFP_KERNEL);
+ if (!vol->UNCip) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for UNC IP\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
+ case Opt_unc:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: invalid path to "
+ "network resource\n");
+ goto cifs_parse_mount_err;
+ }
+
+ temp_len = strnlen(string, 300);
+ if (temp_len == 300) {
+ printk(KERN_WARNING "CIFS: UNC name too long\n");
+ goto cifs_parse_mount_err;
+ }
+
+ if (strncmp(string, "//", 2) == 0) {
vol->UNC[0] = '\\';
vol->UNC[1] = '\\';
- } else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
+ } else if (strncmp(string, "\\\\", 2) != 0) {
printk(KERN_WARNING "CIFS: UNC Path does not "
- "begin with // or \\\\ \n");
+ "begin with // or \\\\\n");
goto cifs_parse_mount_err;
}
- value = strpbrk(vol->UNC+2, "/\\");
- if (value)
- *value = '\\';
- } else {
- printk(KERN_WARNING "CIFS: UNC name too long\n");
+
+ vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
+ if (vol->UNC == NULL) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for UNC\n");
+ goto cifs_parse_mount_err;
+ }
+ strcpy(vol->UNC, string);
+ break;
+ case Opt_domain:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: invalid domain"
+ " name\n");
+ goto cifs_parse_mount_err;
+ } else if (strnlen(string, 256) == 256) {
+ printk(KERN_WARNING "CIFS: domain name too"
+ " long\n");
+ goto cifs_parse_mount_err;
+ }
+
+ vol->domainname = kstrdup(string, GFP_KERNEL);
+ if (!vol->domainname) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for domainname\n");
+ goto cifs_parse_mount_err;
+ }
+ cFYI(1, "Domain name set");
+ break;
+ case Opt_srcaddr:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: srcaddr value not"
+ " specified\n");
+ goto cifs_parse_mount_err;
+ } else if (!cifs_convert_address(
+ (struct sockaddr *)&vol->srcaddr,
+ string, strlen(string))) {
+ printk(KERN_WARNING "CIFS: Could not parse"
+ " srcaddr: %s\n", string);
+ goto cifs_parse_mount_err;
+ }
+ break;
+ case Opt_prefixpath:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: Invalid path"
+ " prefix\n");
+ goto cifs_parse_mount_err;
+ }
+ temp_len = strnlen(string, 1024);
+ if (string[0] != '/')
+ temp_len++; /* missing leading slash */
+ if (temp_len > 1024) {
+ printk(KERN_WARNING "CIFS: prefix too long\n");
+ goto cifs_parse_mount_err;
+ }
+
+ vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
+ if (vol->prepath == NULL) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for path prefix\n");
+ goto cifs_parse_mount_err;
+ }
+
+ if (string[0] != '/') {
+ vol->prepath[0] = '/';
+ strcpy(vol->prepath+1, string);
+ } else
+ strcpy(vol->prepath, string);
+
+ break;
+ case Opt_iocharset:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: Invalid iocharset"
+ " specified\n");
+ goto cifs_parse_mount_err;
+ } else if (strnlen(string, 1024) >= 65) {
+ printk(KERN_WARNING "CIFS: iocharset name "
+ "too long.\n");
+ goto cifs_parse_mount_err;
+ }
+
+ if (strnicmp(string, "default", 7) != 0) {
+ vol->iocharset = kstrdup(string,
+ GFP_KERNEL);
+ if (!vol->iocharset) {
+ printk(KERN_WARNING "CIFS: no memory"
+ "for charset\n");
+ goto cifs_parse_mount_err;
+ }
+ }
+ /* if iocharset not set then load_nls_default
+ * is used by caller
+ */
+ cFYI(1, "iocharset set to %s", string);
+ break;
+ case Opt_sockopt:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: No socket option"
+ " specified\n");
+ goto cifs_parse_mount_err;
+ }
+ if (strnicmp(string, "TCP_NODELAY", 11) == 0)
+ vol->sockopt_tcp_nodelay = 1;
+ break;
+ case Opt_netbiosname:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: Invalid (empty)"
+ " netbiosname\n");
+ break;
+ }
+
+ memset(vol->source_rfc1001_name, 0x20,
+ RFC1001_NAME_LEN);
+ /*
+ * FIXME: are there cases in which a comma can
+ * be valid in workstation netbios name (and
+ * need special handling)?
+ */
+ for (i = 0; i < RFC1001_NAME_LEN; i++) {
+ /* don't ucase netbiosname for user */
+ if (string[i] == 0)
+ break;
+ vol->source_rfc1001_name[i] = string[i];
+ }
+ /* The string has 16th byte zero still from
+ * set at top of the function
+ */
+ if (i == RFC1001_NAME_LEN && string[i] != 0)
+ printk(KERN_WARNING "CIFS: netbiosname"
+ " longer than 15 truncated.\n");
+
+ break;
+ case Opt_servern:
+ /* servernetbiosname specified override *SMBSERVER */
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: Empty server"
+ " netbiosname specified\n");
+ break;
+ }
+ /* last byte, type, is 0x20 for servr type */
+ memset(vol->target_rfc1001_name, 0x20,
+ RFC1001_NAME_LEN_WITH_NULL);
+
+ /* BB are there cases in which a comma can be
+ valid in this workstation netbios name
+ (and need special handling)? */
+
+ /* user or mount helper must uppercase the
+ netbios name */
+ for (i = 0; i < 15; i++) {
+ if (string[i] == 0)
+ break;
+ vol->target_rfc1001_name[i] = string[i];
+ }
+ /* The string has 16th byte zero still from
+ set at top of the function */
+ if (i == RFC1001_NAME_LEN && string[i] != 0)
+ printk(KERN_WARNING "CIFS: server net"
+ "biosname longer than 15 truncated.\n");
+ break;
+ case Opt_ver:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ cERROR(1, "no protocol version specified"
+ " after vers= mount option");
+ goto cifs_parse_mount_err;
+ }
+
+ if (strnicmp(string, "cifs", 4) == 0 ||
+ strnicmp(string, "1", 1) == 0) {
+ /* This is the default */
+ break;
+ }
+ /* For all other value, error */
+ printk(KERN_WARNING "CIFS: Invalid version"
+ " specified\n");
goto cifs_parse_mount_err;
+ case Opt_sec:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (!*string) {
+ printk(KERN_WARNING "CIFS: no security flavor"
+ " specified\n");
+ break;
+ }
+
+ if (cifs_parse_security_flavors(string, vol) != 0)
+ goto cifs_parse_mount_err;
+ break;
+ default:
+ /*
+ * An option we don't recognize. Save it off for later
+ * if we haven't already found one
+ */
+ if (!invalid)
+ invalid = data;
+ break;
}
+ /* Free up any allocated string */
+ kfree(string);
+ string = NULL;
+ }
+
+ if (!sloppy && invalid) {
+ printk(KERN_ERR "CIFS: Unknown mount option \"%s\"\n", invalid);
+ goto cifs_parse_mount_err;
}
#ifndef CONFIG_KEYS
@@ -1625,7 +1924,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
kfree(mountdata_copy);
return 0;
+out_nomem:
+ printk(KERN_WARNING "Could not allocate temporary buffer\n");
cifs_parse_mount_err:
+ kfree(string);
kfree(mountdata_copy);
return 1;
}
@@ -1977,7 +2279,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cifs_fscache_get_client_cookie(tcp_ses);
/* queue echo request delayed work */
- queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
return tcp_ses;
@@ -3543,7 +3845,7 @@ remote_path_check:
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
- queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+ queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
mount_fail_check:
@@ -4097,6 +4399,6 @@ cifs_prune_tlinks(struct work_struct *work)
}
spin_unlock(&cifs_sb->tlink_tree_lock);
- queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+ queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 159fcc56dc2d..460d87b7cda0 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1399,7 +1399,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
return rc;
}
-/* update the file size (if needed) after a write */
+/*
+ * update the file size (if needed) after a write. Should be called with
+ * the inode->i_lock held
+ */
void
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written)
@@ -1471,7 +1474,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
return rc;
}
} else {
+ spin_lock(&dentry->d_inode->i_lock);
cifs_update_eof(cifsi, *poffset, bytes_written);
+ spin_unlock(&dentry->d_inode->i_lock);
*poffset += bytes_written;
}
}
@@ -1648,6 +1653,27 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
return rc;
}
+/*
+ * Marshal up the iov array, reserving the first one for the header. Also,
+ * set wdata->bytes.
+ */
+static void
+cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
+{
+ int i;
+ struct inode *inode = wdata->cfile->dentry->d_inode;
+ loff_t size = i_size_read(inode);
+
+ /* marshal up the pages into iov array */
+ wdata->bytes = 0;
+ for (i = 0; i < wdata->nr_pages; i++) {
+ iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
+ (loff_t)PAGE_CACHE_SIZE);
+ iov[i + 1].iov_base = kmap(wdata->pages[i]);
+ wdata->bytes += iov[i + 1].iov_len;
+ }
+}
+
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
@@ -1684,7 +1710,8 @@ retry:
tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
end - index) + 1;
- wdata = cifs_writedata_alloc((unsigned int)tofind);
+ wdata = cifs_writedata_alloc((unsigned int)tofind,
+ cifs_writev_complete);
if (!wdata) {
rc = -ENOMEM;
break;
@@ -1791,6 +1818,7 @@ retry:
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
wdata->offset = page_offset(wdata->pages[0]);
+ wdata->marshal_iov = cifs_writepages_marshal_iov;
do {
if (wdata->cfile != NULL)
@@ -1802,6 +1830,7 @@ retry:
rc = -EBADF;
break;
}
+ wdata->pid = wdata->cfile->pid;
rc = cifs_async_writev(wdata);
} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
@@ -2043,7 +2072,7 @@ cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
unsigned long i;
for (i = 0; i < num_pages; i++) {
- pages[i] = alloc_page(__GFP_HIGHMEM);
+ pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
if (!pages[i]) {
/*
* save number of pages we have already allocated and
@@ -2051,15 +2080,14 @@ cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
*/
num_pages = i;
rc = -ENOMEM;
- goto error;
+ break;
}
}
- return rc;
-
-error:
- for (i = 0; i < num_pages; i++)
- put_page(pages[i]);
+ if (rc) {
+ for (i = 0; i < num_pages; i++)
+ put_page(pages[i]);
+ }
return rc;
}
@@ -2070,9 +2098,7 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
size_t clen;
clen = min_t(const size_t, len, wsize);
- num_pages = clen / PAGE_CACHE_SIZE;
- if (clen % PAGE_CACHE_SIZE)
- num_pages++;
+ num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
if (cur_len)
*cur_len = clen;
@@ -2080,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
return num_pages;
}
+static void
+cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
+{
+ int i;
+ size_t bytes = wdata->bytes;
+
+ /* marshal up the pages into iov array */
+ for (i = 0; i < wdata->nr_pages; i++) {
+ iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
+ iov[i + 1].iov_base = kmap(wdata->pages[i]);
+ bytes -= iov[i + 1].iov_len;
+ }
+}
+
+static void
+cifs_uncached_writev_complete(struct work_struct *work)
+{
+ int i;
+ struct cifs_writedata *wdata = container_of(work,
+ struct cifs_writedata, work);
+ struct inode *inode = wdata->cfile->dentry->d_inode;
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+ spin_lock(&inode->i_lock);
+ cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
+ if (cifsi->server_eof > inode->i_size)
+ i_size_write(inode, cifsi->server_eof);
+ spin_unlock(&inode->i_lock);
+
+ complete(&wdata->done);
+
+ if (wdata->result != -EAGAIN) {
+ for (i = 0; i < wdata->nr_pages; i++)
+ put_page(wdata->pages[i]);
+ }
+
+ kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+/* attempt to send write to server, retry on any -EAGAIN errors */
+static int
+cifs_uncached_retry_writev(struct cifs_writedata *wdata)
+{
+ int rc;
+
+ do {
+ if (wdata->cfile->invalidHandle) {
+ rc = cifs_reopen_file(wdata->cfile, false);
+ if (rc != 0)
+ continue;
+ }
+ rc = cifs_async_writev(wdata);
+ } while (rc == -EAGAIN);
+
+ return rc;
+}
+
static ssize_t
cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
- unsigned int written;
- unsigned long num_pages, npages, i;
+ unsigned long nr_pages, i;
size_t copied, len, cur_len;
ssize_t total_written = 0;
- struct kvec *to_send;
- struct page **pages;
+ loff_t offset = *poffset;
struct iov_iter it;
- struct inode *inode;
struct cifsFileInfo *open_file;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
- struct cifs_io_parms io_parms;
- int xid, rc;
- __u32 pid;
+ struct cifs_writedata *wdata, *tmp;
+ struct list_head wdata_list;
+ int rc;
+ pid_t pid;
len = iov_length(iov, nr_segs);
if (!len)
@@ -2107,103 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
if (rc)
return rc;
+ INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
-
- pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
- if (!to_send) {
- kfree(pages);
- return -ENOMEM;
- }
-
- rc = cifs_write_allocate_pages(pages, num_pages);
- if (rc) {
- kfree(pages);
- kfree(to_send);
- return rc;
- }
-
- xid = GetXid();
open_file = file->private_data;
+ tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
- pTcon = tlink_tcon(open_file->tlink);
- inode = file->f_path.dentry->d_inode;
-
iov_iter_init(&it, iov, nr_segs, len, 0);
- npages = num_pages;
-
do {
- size_t save_len = cur_len;
- for (i = 0; i < npages; i++) {
- copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
- copied = iov_iter_copy_from_user(pages[i], &it, 0,
- copied);
+ size_t save_len;
+
+ nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+ wdata = cifs_writedata_alloc(nr_pages,
+ cifs_uncached_writev_complete);
+ if (!wdata) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
+ if (rc) {
+ kfree(wdata);
+ break;
+ }
+
+ save_len = cur_len;
+ for (i = 0; i < nr_pages; i++) {
+ copied = min_t(const size_t, cur_len, PAGE_SIZE);
+ copied = iov_iter_copy_from_user(wdata->pages[i], &it,
+ 0, copied);
cur_len -= copied;
iov_iter_advance(&it, copied);
- to_send[i+1].iov_base = kmap(pages[i]);
- to_send[i+1].iov_len = copied;
}
-
cur_len = save_len - cur_len;
- do {
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, false);
- if (rc != 0)
- break;
- }
- io_parms.netfid = open_file->netfid;
- io_parms.pid = pid;
- io_parms.tcon = pTcon;
- io_parms.offset = *poffset;
- io_parms.length = cur_len;
- rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
- npages, 0);
- } while (rc == -EAGAIN);
-
- for (i = 0; i < npages; i++)
- kunmap(pages[i]);
-
- if (written) {
- len -= written;
- total_written += written;
- cifs_update_eof(CIFS_I(inode), *poffset, written);
- *poffset += written;
- } else if (rc < 0) {
- if (!total_written)
- total_written = rc;
+ wdata->sync_mode = WB_SYNC_ALL;
+ wdata->nr_pages = nr_pages;
+ wdata->offset = (__u64)offset;
+ wdata->cfile = cifsFileInfo_get(open_file);
+ wdata->pid = pid;
+ wdata->bytes = cur_len;
+ wdata->marshal_iov = cifs_uncached_marshal_iov;
+ rc = cifs_uncached_retry_writev(wdata);
+ if (rc) {
+ kref_put(&wdata->refcount, cifs_writedata_release);
break;
}
- /* get length and number of kvecs of the next write */
- npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+ list_add_tail(&wdata->list, &wdata_list);
+ offset += cur_len;
+ len -= cur_len;
} while (len > 0);
- if (total_written > 0) {
- spin_lock(&inode->i_lock);
- if (*poffset > inode->i_size)
- i_size_write(inode, *poffset);
- spin_unlock(&inode->i_lock);
+ /*
+ * If at least one write was successfully sent, then discard any rc
+ * value from the later writes. If the other write succeeds, then
+ * we'll end up returning whatever was written. If it fails, then
+ * we'll get a new rc value from that.
+ */
+ if (!list_empty(&wdata_list))
+ rc = 0;
+
+ /*
+ * Wait for and collect replies for any successful sends in order of
+ * increasing offset. Once an error is hit or we get a fatal signal
+ * while waiting, then return without waiting for any more replies.
+ */
+restart_loop:
+ list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
+ if (!rc) {
+ /* FIXME: freezable too? */
+ rc = wait_for_completion_killable(&wdata->done);
+ if (rc)
+ rc = -EINTR;
+ else if (wdata->result)
+ rc = wdata->result;
+ else
+ total_written += wdata->bytes;
+
+ /* resend call if it's a retryable error */
+ if (rc == -EAGAIN) {
+ rc = cifs_uncached_retry_writev(wdata);
+ goto restart_loop;
+ }
+ }
+ list_del_init(&wdata->list);
+ kref_put(&wdata->refcount, cifs_writedata_release);
}
- cifs_stats_bytes_written(pTcon, total_written);
- mark_inode_dirty_sync(inode);
+ if (total_written > 0)
+ *poffset += total_written;
- for (i = 0; i < num_pages; i++)
- put_page(pages[i]);
- kfree(to_send);
- kfree(pages);
- FreeXid(xid);
- return total_written;
+ cifs_stats_bytes_written(tcon, total_written);
+ return total_written ? total_written : (ssize_t)rc;
}
ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c273c12de98e..c29d1aa2c54f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -213,55 +213,62 @@ cifs_small_buf_release(void *buf_to_free)
}
/*
- Find a free multiplex id (SMB mid). Otherwise there could be
- mid collisions which might cause problems, demultiplexing the
- wrong response to this request. Multiplex ids could collide if
- one of a series requests takes much longer than the others, or
- if a very large number of long lived requests (byte range
- locks or FindNotify requests) are pending. No more than
- 64K-1 requests can be outstanding at one time. If no
- mids are available, return zero. A future optimization
- could make the combination of mids and uid the key we use
- to demultiplex on (rather than mid alone).
- In addition to the above check, the cifs demultiplex
- code already used the command code as a secondary
- check of the frame and if signing is negotiated the
- response would be discarded if the mid were the same
- but the signature was wrong. Since the mid is not put in the
- pending queue until later (when it is about to be dispatched)
- we do have to limit the number of outstanding requests
- to somewhat less than 64K-1 although it is hard to imagine
- so many threads being in the vfs at one time.
-*/
-__u16 GetNextMid(struct TCP_Server_Info *server)
+ * Find a free multiplex id (SMB mid). Otherwise there could be
+ * mid collisions which might cause problems, demultiplexing the
+ * wrong response to this request. Multiplex ids could collide if
+ * one of a series requests takes much longer than the others, or
+ * if a very large number of long lived requests (byte range
+ * locks or FindNotify requests) are pending. No more than
+ * 64K-1 requests can be outstanding at one time. If no
+ * mids are available, return zero. A future optimization
+ * could make the combination of mids and uid the key we use
+ * to demultiplex on (rather than mid alone).
+ * In addition to the above check, the cifs demultiplex
+ * code already used the command code as a secondary
+ * check of the frame and if signing is negotiated the
+ * response would be discarded if the mid were the same
+ * but the signature was wrong. Since the mid is not put in the
+ * pending queue until later (when it is about to be dispatched)
+ * we do have to limit the number of outstanding requests
+ * to somewhat less than 64K-1 although it is hard to imagine
+ * so many threads being in the vfs at one time.
+ */
+__u64 GetNextMid(struct TCP_Server_Info *server)
{
- __u16 mid = 0;
- __u16 last_mid;
+ __u64 mid = 0;
+ __u16 last_mid, cur_mid;
bool collision;
spin_lock(&GlobalMid_Lock);
- last_mid = server->CurrentMid; /* we do not want to loop forever */
- server->CurrentMid++;
- /* This nested loop looks more expensive than it is.
- In practice the list of pending requests is short,
- fewer than 50, and the mids are likely to be unique
- on the first pass through the loop unless some request
- takes longer than the 64 thousand requests before it
- (and it would also have to have been a request that
- did not time out) */
- while (server->CurrentMid != last_mid) {
+
+ /* mid is 16 bit only for CIFS/SMB */
+ cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+ /* we do not want to loop forever */
+ last_mid = cur_mid;
+ cur_mid++;
+
+ /*
+ * This nested loop looks more expensive than it is.
+ * In practice the list of pending requests is short,
+ * fewer than 50, and the mids are likely to be unique
+ * on the first pass through the loop unless some request
+ * takes longer than the 64 thousand requests before it
+ * (and it would also have to have been a request that
+ * did not time out).
+ */
+ while (cur_mid != last_mid) {
struct mid_q_entry *mid_entry;
unsigned int num_mids;
collision = false;
- if (server->CurrentMid == 0)
- server->CurrentMid++;
+ if (cur_mid == 0)
+ cur_mid++;
num_mids = 0;
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
++num_mids;
- if (mid_entry->mid == server->CurrentMid &&
- mid_entry->midState == MID_REQUEST_SUBMITTED) {
+ if (mid_entry->mid == cur_mid &&
+ mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
/* This mid is in use, try a different one */
collision = true;
break;
@@ -282,10 +289,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
server->tcpStatus = CifsNeedReconnect;
if (!collision) {
- mid = server->CurrentMid;
+ mid = (__u64)cur_mid;
+ server->CurrentMid = mid;
break;
}
- server->CurrentMid++;
+ cur_mid++;
}
spin_unlock(&GlobalMid_Lock);
return mid;
@@ -420,8 +428,10 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
}
int
-checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
+checkSMB(char *buf, unsigned int total_read)
{
+ struct smb_hdr *smb = (struct smb_hdr *)buf;
+ __u16 mid = smb->Mid;
__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
__u32 clc_len; /* calculated length */
cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
@@ -502,8 +512,9 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
}
bool
-is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
+is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
{
+ struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
struct list_head *tmp, *tmp1, *tmp2;
struct cifs_ses *ses;
@@ -584,7 +595,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
cifs_set_oplock_level(pCifsInode,
pSMB->OplockLevel ? OPLOCK_READ : 0);
- queue_work(system_nrt_wq,
+ queue_work(cifsiod_wq,
&netfile->oplock_break);
netfile->oplock_break_cancelled = false;
@@ -604,16 +615,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
}
void
-dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
+dump_smb(void *buf, int smb_buf_length)
{
int i, j;
char debug_line[17];
- unsigned char *buffer;
+ unsigned char *buffer = buf;
if (traceSMB == 0)
return;
- buffer = (unsigned char *) smb_buf;
for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
if (i % 8 == 0) {
/* have reached the beginning of line */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 73e47e84b61a..dd23a321bdda 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -836,8 +836,9 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
}
int
-map_smb_to_linux_error(struct smb_hdr *smb, bool logErr)
+map_smb_to_linux_error(char *buf, bool logErr)
{
+ struct smb_hdr *smb = (struct smb_hdr *)buf;
unsigned int i;
int rc = -EIO; /* if transport error smb error may not be set */
__u8 smberrclass;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 310918b6fcb4..0961336513d5 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -60,8 +60,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
memset(temp, 0, sizeof(struct mid_q_entry));
temp->mid = smb_buffer->Mid; /* always LE */
temp->pid = current->pid;
- temp->command = smb_buffer->Command;
- cFYI(1, "For smb_command %d", temp->command);
+ temp->command = cpu_to_le16(smb_buffer->Command);
+ cFYI(1, "For smb_command %d", smb_buffer->Command);
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
@@ -75,7 +75,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
}
atomic_inc(&midCount);
- temp->midState = MID_REQUEST_ALLOCATED;
+ temp->mid_state = MID_REQUEST_ALLOCATED;
return temp;
}
@@ -85,9 +85,9 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
#ifdef CONFIG_CIFS_STATS2
unsigned long now;
#endif
- midEntry->midState = MID_FREE;
+ midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
- if (midEntry->largeBuf)
+ if (midEntry->large_buf)
cifs_buf_release(midEntry->resp_buf);
else
cifs_small_buf_release(midEntry->resp_buf);
@@ -97,8 +97,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
something is wrong, unless it is quite a slow link or server */
if ((now - midEntry->when_alloc) > HZ) {
if ((cifsFYI & CIFS_TIMER) &&
- (midEntry->command != SMB_COM_LOCKING_ANDX)) {
- printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
+ (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
+ printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
now - midEntry->when_alloc,
@@ -126,11 +126,11 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
int rc = 0;
int i = 0;
struct msghdr smb_msg;
- struct smb_hdr *smb_buffer = iov[0].iov_base;
+ __be32 *buf_len = (__be32 *)(iov[0].iov_base);
unsigned int len = iov[0].iov_len;
unsigned int total_len;
int first_vec = 0;
- unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
+ unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
struct socket *ssocket = server->ssocket;
if (ssocket == NULL)
@@ -150,7 +150,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
total_len += iov[i].iov_len;
cFYI(1, "Sending smb: total_len %d", total_len);
- dump_smb(smb_buffer, len);
+ dump_smb(iov[0].iov_base, len);
i = 0;
while (total_len) {
@@ -158,24 +158,24 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
n_vec - first_vec, total_len);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
- /* if blocking send we try 3 times, since each can block
- for 5 seconds. For nonblocking we have to try more
- but wait increasing amounts of time allowing time for
- socket to clear. The overall time we wait in either
- case to send on the socket is about 15 seconds.
- Similarly we wait for 15 seconds for
- a response from the server in SendReceive[2]
- for the server to send a response back for
- most types of requests (except SMB Write
- past end of file which can be slow, and
- blocking lock operations). NFS waits slightly longer
- than CIFS, but this can make it take longer for
- nonresponsive servers to be detected and 15 seconds
- is more than enough time for modern networks to
- send a packet. In most cases if we fail to send
- after the retries we will kill the socket and
- reconnect which may clear the network problem.
- */
+ /*
+ * If blocking send we try 3 times, since each can block
+ * for 5 seconds. For nonblocking we have to try more
+ * but wait increasing amounts of time allowing time for
+ * socket to clear. The overall time we wait in either
+ * case to send on the socket is about 15 seconds.
+ * Similarly we wait for 15 seconds for a response from
+ * the server in SendReceive[2] for the server to send
+ * a response back for most types of requests (except
+ * SMB Write past end of file which can be slow, and
+ * blocking lock operations). NFS waits slightly longer
+ * than CIFS, but this can make it take longer for
+ * nonresponsive servers to be detected and 15 seconds
+ * is more than enough time for modern networks to
+ * send a packet. In most cases if we fail to send
+ * after the retries we will kill the socket and
+ * reconnect which may clear the network problem.
+ */
if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
cERROR(1, "sends on sock %p stuck for 15 seconds",
ssocket);
@@ -235,9 +235,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
else
rc = 0;
- /* Don't want to modify the buffer as a
- side effect of this call. */
- smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
+ /* Don't want to modify the buffer as a side effect of this call. */
+ *buf_len = cpu_to_be32(smb_buf_length);
return rc;
}
@@ -342,13 +341,40 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
int error;
error = wait_event_freezekillable(server->response_q,
- midQ->midState != MID_REQUEST_SUBMITTED);
+ midQ->mid_state != MID_REQUEST_SUBMITTED);
if (error < 0)
return -ERESTARTSYS;
return 0;
}
+static int
+cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
+ unsigned int nvec, struct mid_q_entry **ret_mid)
+{
+ int rc;
+ struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
+ struct mid_q_entry *mid;
+
+ /* enable signing if server requires it */
+ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+ mid = AllocMidQEntry(hdr, server);
+ if (mid == NULL)
+ return -ENOMEM;
+
+ /* put it on the pending_mid_q */
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&mid->qhead, &server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
+
+ rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
+ if (rc)
+ delete_mid(mid);
+ *ret_mid = mid;
+ return rc;
+}
/*
* Send a SMB request and set the callback function in the mid to handle
@@ -361,40 +387,24 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
{
int rc;
struct mid_q_entry *mid;
- struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
if (rc)
return rc;
- /* enable signing if server requires it */
- if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
mutex_lock(&server->srv_mutex);
- mid = AllocMidQEntry(hdr, server);
- if (mid == NULL) {
+ rc = cifs_setup_async_request(server, iov, nvec, &mid);
+ if (rc) {
mutex_unlock(&server->srv_mutex);
cifs_add_credits(server, 1);
wake_up(&server->request_q);
- return -ENOMEM;
- }
-
- /* put it on the pending_mid_q */
- spin_lock(&GlobalMid_Lock);
- list_add_tail(&mid->qhead, &server->pending_mid_q);
- spin_unlock(&GlobalMid_Lock);
-
- rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
- if (rc) {
- mutex_unlock(&server->srv_mutex);
- goto out_err;
+ return rc;
}
mid->receive = receive;
mid->callback = callback;
mid->callback_data = cbdata;
- mid->midState = MID_REQUEST_SUBMITTED;
+ mid->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(server);
rc = smb_sendv(server, iov, nvec);
@@ -424,14 +434,14 @@ out_err:
*/
int
SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
- struct smb_hdr *in_buf, int flags)
+ char *in_buf, int flags)
{
int rc;
struct kvec iov[1];
int resp_buf_type;
- iov[0].iov_base = (char *)in_buf;
- iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
+ iov[0].iov_base = in_buf;
+ iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
flags |= CIFS_NO_RESP;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
@@ -444,11 +454,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
{
int rc = 0;
- cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
- mid->mid, mid->midState);
+ cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
+ le16_to_cpu(mid->command), mid->mid, mid->mid_state);
spin_lock(&GlobalMid_Lock);
- switch (mid->midState) {
+ switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
spin_unlock(&GlobalMid_Lock);
return rc;
@@ -463,8 +473,8 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
break;
default:
list_del_init(&mid->qhead);
- cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
- mid->mid, mid->midState);
+ cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
+ mid->mid, mid->mid_state);
rc = -EIO;
}
spin_unlock(&GlobalMid_Lock);
@@ -514,7 +524,7 @@ int
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
bool log_error)
{
- unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
+ unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
dump_smb(mid->resp_buf, min_t(u32, 92, len));
@@ -534,6 +544,24 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
return map_smb_to_linux_error(mid->resp_buf, log_error);
}
+static int
+cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
+ unsigned int nvec, struct mid_q_entry **ret_mid)
+{
+ int rc;
+ struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
+ struct mid_q_entry *mid;
+
+ rc = allocate_mid(ses, hdr, &mid);
+ if (rc)
+ return rc;
+ rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
+ if (rc)
+ delete_mid(mid);
+ *ret_mid = mid;
+ return rc;
+}
+
int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -542,55 +570,53 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
int rc = 0;
int long_op;
struct mid_q_entry *midQ;
- struct smb_hdr *in_buf = iov[0].iov_base;
+ char *buf = iov[0].iov_base;
long_op = flags & CIFS_TIMEOUT_MASK;
*pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
cERROR(1, "Null session");
return -EIO;
}
if (ses->server->tcpStatus == CifsExiting) {
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
return -ENOENT;
}
- /* Ensure that we do not send more than 50 overlapping requests
- to the same server. We may make this configurable later or
- use ses->maxReq */
+ /*
+ * Ensure that we do not send more than 50 overlapping requests
+ * to the same server. We may make this configurable later or
+ * use ses->maxReq.
+ */
rc = wait_for_free_request(ses->server, long_op);
if (rc) {
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
return rc;
}
- /* make sure that we sign in the same order that we send on this socket
- and avoid races inside tcp sendmsg code that could cause corruption
- of smb data */
+ /*
+ * Make sure that we sign in the same order that we send on this socket
+ * and avoid races inside tcp sendmsg code that could cause corruption
+ * of smb data.
+ */
mutex_lock(&ses->server->srv_mutex);
- rc = allocate_mid(ses, in_buf, &midQ);
+ rc = cifs_setup_request(ses, iov, n_vec, &midQ);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
/* Update # of requests on wire to server */
cifs_add_credits(ses->server, 1);
return rc;
}
- rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
- if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
- cifs_small_buf_release(in_buf);
- goto out;
- }
- midQ->midState = MID_REQUEST_SUBMITTED;
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_sendv(ses->server, iov, n_vec);
cifs_in_send_dec(ses->server);
@@ -599,30 +625,30 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
goto out;
}
if (long_op == CIFS_ASYNC_OP) {
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
goto out;
}
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
- send_nt_cancel(ses->server, in_buf, midQ);
+ send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
spin_lock(&GlobalMid_Lock);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
cifs_add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
}
- cifs_small_buf_release(in_buf);
+ cifs_small_buf_release(buf);
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
@@ -630,15 +656,16 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
return rc;
}
- if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
+ if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cFYI(1, "Bad MID state?");
goto out;
}
- iov[0].iov_base = (char *)midQ->resp_buf;
- iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
- if (midQ->largeBuf)
+ buf = (char *)midQ->resp_buf;
+ iov[0].iov_base = buf;
+ iov[0].iov_len = get_rfc1002_length(buf) + 4;
+ if (midQ->large_buf)
*pRespBufType = CIFS_LARGE_BUFFER;
else
*pRespBufType = CIFS_SMALL_BUFFER;
@@ -710,7 +737,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
goto out;
}
- midQ->midState = MID_REQUEST_SUBMITTED;
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
@@ -728,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc != 0) {
send_nt_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
@@ -745,13 +772,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
}
if (!midQ->resp_buf || !out_buf ||
- midQ->midState != MID_RESPONSE_RECEIVED) {
+ midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cERROR(1, "Bad MID state?");
goto out;
}
- *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+ *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, ses->server, 0);
out:
@@ -844,7 +871,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
- midQ->midState = MID_REQUEST_SUBMITTED;
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
cifs_in_send_dec(ses->server);
@@ -858,13 +885,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(ses->server->response_q,
- (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
+ (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
if ((rc == -ERESTARTSYS) &&
- (midQ->midState == MID_REQUEST_SUBMITTED) &&
+ (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
@@ -894,7 +921,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
if (rc) {
send_nt_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
@@ -912,13 +939,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
/* rcvd frame is ok */
- if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
+ if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cERROR(1, "Bad MID state?");
goto out;
}
- *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+ *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, ses->server, 0);
out:
diff --git a/fs/compat.c b/fs/compat.c
index 14483a715bbb..f2944ace7a7b 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1170,10 +1170,9 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
}
asmlinkage ssize_t
-compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
- unsigned long vlen, u32 pos_low, u32 pos_high)
+compat_sys_preadv64(unsigned long fd, const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos)
{
- loff_t pos = ((loff_t)pos_high << 32) | pos_low;
struct file *file;
int fput_needed;
ssize_t ret;
@@ -1190,6 +1189,14 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
return ret;
}
+asmlinkage ssize_t
+compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
+ unsigned long vlen, u32 pos_low, u32 pos_high)
+{
+ loff_t pos = ((loff_t)pos_high << 32) | pos_low;
+ return compat_sys_preadv64(fd, vec, vlen, pos);
+}
+
static size_t compat_writev(struct file *file,
const struct compat_iovec __user *vec,
unsigned long vlen, loff_t *pos)
@@ -1229,10 +1236,9 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
}
asmlinkage ssize_t
-compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
- unsigned long vlen, u32 pos_low, u32 pos_high)
+compat_sys_pwritev64(unsigned long fd, const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos)
{
- loff_t pos = ((loff_t)pos_high << 32) | pos_low;
struct file *file;
int fput_needed;
ssize_t ret;
@@ -1249,6 +1255,14 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
return ret;
}
+asmlinkage ssize_t
+compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
+ unsigned long vlen, u32 pos_low, u32 pos_high)
+{
+ loff_t pos = ((loff_t)pos_high << 32) | pos_low;
+ return compat_sys_pwritev64(fd, vec, vlen, pos);
+}
+
asmlinkage long
compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32,
unsigned int nr_segs, unsigned int flags)
diff --git a/fs/exec.c b/fs/exec.c
index c8b63d14da85..9a1d9f0a60ab 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1028,10 +1028,10 @@ static void flush_old_files(struct files_struct * files)
fdt = files_fdtable(files);
if (i >= fdt->max_fds)
break;
- set = fdt->close_on_exec->fds_bits[j];
+ set = fdt->close_on_exec[j];
if (!set)
continue;
- fdt->close_on_exec->fds_bits[j] = 0;
+ fdt->close_on_exec[j] = 0;
spin_unlock(&files->file_lock);
for ( ; set ; i++,set >>= 1) {
if (set & 1) {
@@ -2067,8 +2067,8 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
fd_install(0, rp);
spin_lock(&cf->file_lock);
fdt = files_fdtable(cf);
- FD_SET(0, fdt->open_fds);
- FD_CLR(0, fdt->close_on_exec);
+ __set_open_fd(0, fdt);
+ __clear_close_on_exec(0, fdt);
spin_unlock(&cf->file_lock);
/* and disallow core files too */
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ad56866d729a..b86786202643 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
-static int ext4_readdir(struct file *, void *, filldir_t);
static int ext4_dx_readdir(struct file *filp,
void *dirent, filldir_t filldir);
-static int ext4_release_dir(struct inode *inode,
- struct file *filp);
-
-const struct file_operations ext4_dir_operations = {
- .llseek = ext4_llseek,
- .read = generic_read_dir,
- .readdir = ext4_readdir, /* we take BKL. needed?*/
- .unlocked_ioctl = ext4_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ext4_compat_ioctl,
-#endif
- .fsync = ext4_sync_file,
- .release = ext4_release_dir,
-};
-
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext4_filetype_table[filetype]);
}
+/**
+ * Check if the given dir-inode refers to an htree-indexed directory
+ * (or a directory which chould potentially get coverted to use htree
+ * indexing).
+ *
+ * Return 1 if it is a dx dir, 0 if not
+ */
+static int is_dx_dir(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_COMPAT_DIR_INDEX) &&
+ ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
+ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
+ return 1;
+
+ return 0;
+}
+
/*
* Return 0 if the directory entry is OK, and 1 if there is a problem
*
@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp,
unsigned int offset;
int i, stored;
struct ext4_dir_entry_2 *de;
- struct super_block *sb;
int err;
struct inode *inode = filp->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
int ret = 0;
int dir_has_error = 0;
- sb = inode->i_sb;
-
- if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT4_FEATURE_COMPAT_DIR_INDEX) &&
- ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+ if (is_dx_dir(inode)) {
err = ext4_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
ret = err;
@@ -254,22 +253,134 @@ out:
return ret;
}
+static inline int is_32bit_api(void)
+{
+#ifdef CONFIG_COMPAT
+ return is_compat_task();
+#else
+ return (BITS_PER_LONG == 32);
+#endif
+}
+
/*
* These functions convert from the major/minor hash to an f_pos
- * value.
+ * value for dx directories
+ *
+ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
+ * FMODE_64BITHASH explicitly. On the other hand, we allow ext4 to be mounted
+ * directly on both 32-bit and 64-bit nodes, under such case, neither
+ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
+ */
+static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return major >> 1;
+ else
+ return ((__u64)(major >> 1) << 32) | (__u64)minor;
+}
+
+static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return (pos << 1) & 0xffffffff;
+ else
+ return ((pos >> 32) << 1) & 0xffffffff;
+}
+
+static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return 0;
+ else
+ return pos & 0xffffffff;
+}
+
+/*
+ * Return 32- or 64-bit end-of-file for dx directories
+ */
+static inline loff_t ext4_get_htree_eof(struct file *filp)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return EXT4_HTREE_EOF_32BIT;
+ else
+ return EXT4_HTREE_EOF_64BIT;
+}
+
+
+/*
+ * ext4_dir_llseek() based on generic_file_llseek() to handle both
+ * non-htree and htree directories, where the "offset" is in terms
+ * of the filename hash value instead of the byte offset.
*
- * Currently we only use major hash numer. This is unfortunate, but
- * on 32-bit machines, the same VFS interface is used for lseek and
- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
- * lseek/telldir/seekdir will blow out spectacularly, and from within
- * the ext2 low-level routine, we don't know if we're being called by
- * a 64-bit version of the system call or the 32-bit version of the
- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
- * cookie. Sigh.
+ * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
+ * will be invalid once the directory was converted into a dx directory
*/
-#define hash2pos(major, minor) (major >> 1)
-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
-#define pos2min_hash(pos) (0)
+loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ loff_t ret = -EINVAL;
+ int dx_dir = is_dx_dir(inode);
+
+ mutex_lock(&inode->i_mutex);
+
+ /* NOTE: relative offsets with dx directories might not work
+ * as expected, as it is difficult to figure out the
+ * correct offset between dx hashes */
+
+ switch (origin) {
+ case SEEK_END:
+ if (unlikely(offset > 0))
+ goto out_err; /* not supported for directories */
+
+ /* so only negative offsets are left, does that have a
+ * meaning for directories at all? */
+ if (dx_dir)
+ offset += ext4_get_htree_eof(file);
+ else
+ offset += inode->i_size;
+ break;
+ case SEEK_CUR:
+ /*
+ * Here we special-case the lseek(fd, 0, SEEK_CUR)
+ * position-querying operation. Avoid rewriting the "same"
+ * f_pos value back to the file because a concurrent read(),
+ * write() or lseek() might have altered it
+ */
+ if (offset == 0) {
+ offset = file->f_pos;
+ goto out_ok;
+ }
+
+ offset += file->f_pos;
+ break;
+ }
+
+ if (unlikely(offset < 0))
+ goto out_err;
+
+ if (!dx_dir) {
+ if (offset > inode->i_sb->s_maxbytes)
+ goto out_err;
+ } else if (offset > ext4_get_htree_eof(file))
+ goto out_err;
+
+ /* Special lock needed here? */
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+
+out_ok:
+ ret = offset;
+out_err:
+ mutex_unlock(&inode->i_mutex);
+
+ return ret;
+}
/*
* This structure holds the nodes of the red-black tree used to store
@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root)
}
-static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos)
+static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
+ loff_t pos)
{
struct dir_private_info *p;
p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
if (!p)
return NULL;
- p->curr_hash = pos2maj_hash(pos);
- p->curr_minor_hash = pos2min_hash(pos);
+ p->curr_hash = pos2maj_hash(filp, pos);
+ p->curr_minor_hash = pos2min_hash(filp, pos);
return p;
}
@@ -430,7 +542,7 @@ static int call_filldir(struct file *filp, void *dirent,
inode->i_ino, current->comm);
return 0;
}
- curr_pos = hash2pos(fname->hash, fname->minor_hash);
+ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
fname->name_len, curr_pos,
@@ -455,13 +567,13 @@ static int ext4_dx_readdir(struct file *filp,
int ret;
if (!info) {
- info = ext4_htree_create_dir_info(filp->f_pos);
+ info = ext4_htree_create_dir_info(filp, filp->f_pos);
if (!info)
return -ENOMEM;
filp->private_data = info;
}
- if (filp->f_pos == EXT4_HTREE_EOF)
+ if (filp->f_pos == ext4_get_htree_eof(filp))
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
@@ -469,8 +581,8 @@ static int ext4_dx_readdir(struct file *filp,
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
- info->curr_hash = pos2maj_hash(filp->f_pos);
- info->curr_minor_hash = pos2min_hash(filp->f_pos);
+ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
}
/*
@@ -502,7 +614,7 @@ static int ext4_dx_readdir(struct file *filp,
if (ret < 0)
return ret;
if (ret == 0) {
- filp->f_pos = EXT4_HTREE_EOF;
+ filp->f_pos = ext4_get_htree_eof(filp);
break;
}
info->curr_node = rb_first(&info->root);
@@ -522,7 +634,7 @@ static int ext4_dx_readdir(struct file *filp,
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
- filp->f_pos = EXT4_HTREE_EOF;
+ filp->f_pos = ext4_get_htree_eof(filp);
break;
}
info->curr_hash = info->next_hash;
@@ -541,3 +653,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
return 0;
}
+
+const struct file_operations ext4_dir_operations = {
+ .llseek = ext4_dir_llseek,
+ .read = generic_read_dir,
+ .readdir = ext4_readdir,
+ .unlocked_ioctl = ext4_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ext4_compat_ioctl,
+#endif
+ .fsync = ext4_sync_file,
+ .release = ext4_release_dir,
+};
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ded731ac8a32..ab2594a30f86 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1623,7 +1623,11 @@ struct dx_hash_info
u32 *seed;
};
-#define EXT4_HTREE_EOF 0x7fffffff
+
+/* 32 and 64 bit signed EOF for dx directories */
+#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
+#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
+
/*
* Control parameters used by ext4_htree_next_block
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index ac8f168c8ab4..fa8e4911d354 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
return -1;
}
hash = hash & ~1;
- if (hash == (EXT4_HTREE_EOF << 1))
- hash = (EXT4_HTREE_EOF-1) << 1;
+ if (hash == (EXT4_HTREE_EOF_32BIT << 1))
+ hash = (EXT4_HTREE_EOF_32BIT - 1) << 1;
hinfo->hash = hash;
hinfo->minor_hash = minor_hash;
return 0;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 74cd1f7f1f88..dcdeef169a69 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -60,6 +60,7 @@ void ext4_ioend_wait(struct inode *inode)
static void put_io_page(struct ext4_io_page *io_page)
{
if (atomic_dec_and_test(&io_page->p_count)) {
+ end_page_writeback(io_page->p_page);
put_page(io_page->p_page);
kmem_cache_free(io_page_cachep, io_page);
}
@@ -233,9 +234,9 @@ static void ext4_end_bio(struct bio *bio, int error)
} while (bh != head);
}
- if (atomic_read(&io_end->pages[i]->p_count) == 1)
- end_page_writeback(io_end->pages[i]->p_page);
+ put_io_page(io_end->pages[i]);
}
+ io_end->num_io_pages = 0;
inode = io_end->inode;
if (error) {
@@ -427,8 +428,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
* PageWriteback bit from the page to prevent the system from
* wedging later on.
*/
- if (atomic_read(&io_page->p_count) == 1)
- end_page_writeback(page);
put_io_page(io_page);
return ret;
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 22764c7c8382..75e7c1f3a080 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -32,20 +32,20 @@ void set_close_on_exec(unsigned int fd, int flag)
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (flag)
- FD_SET(fd, fdt->close_on_exec);
+ __set_close_on_exec(fd, fdt);
else
- FD_CLR(fd, fdt->close_on_exec);
+ __clear_close_on_exec(fd, fdt);
spin_unlock(&files->file_lock);
}
-static int get_close_on_exec(unsigned int fd)
+static bool get_close_on_exec(unsigned int fd)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
- int res;
+ bool res;
rcu_read_lock();
fdt = files_fdtable(files);
- res = FD_ISSET(fd, fdt->close_on_exec);
+ res = close_on_exec(fd, fdt);
rcu_read_unlock();
return res;
}
@@ -90,15 +90,15 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
err = -EBUSY;
fdt = files_fdtable(files);
tofree = fdt->fd[newfd];
- if (!tofree && FD_ISSET(newfd, fdt->open_fds))
+ if (!tofree && fd_is_open(newfd, fdt))
goto out_unlock;
get_file(file);
rcu_assign_pointer(fdt->fd[newfd], file);
- FD_SET(newfd, fdt->open_fds);
+ __set_open_fd(newfd, fdt);
if (flags & O_CLOEXEC)
- FD_SET(newfd, fdt->close_on_exec);
+ __set_close_on_exec(newfd, fdt);
else
- FD_CLR(newfd, fdt->close_on_exec);
+ __clear_close_on_exec(newfd, fdt);
spin_unlock(&files->file_lock);
if (tofree)
diff --git a/fs/file.c b/fs/file.c
index 3c426de7203a..ba3f6053025c 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -40,7 +40,7 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */
*/
static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
-static void *alloc_fdmem(unsigned int size)
+static void *alloc_fdmem(size_t size)
{
/*
* Very large allocations can stress page reclaim, so fall back to
@@ -142,7 +142,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
static struct fdtable * alloc_fdtable(unsigned int nr)
{
struct fdtable *fdt;
- char *data;
+ void *data;
/*
* Figure out how many fds we actually want to support in this fdtable.
@@ -172,14 +172,15 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
data = alloc_fdmem(nr * sizeof(struct file *));
if (!data)
goto out_fdt;
- fdt->fd = (struct file **)data;
- data = alloc_fdmem(max_t(unsigned int,
+ fdt->fd = data;
+
+ data = alloc_fdmem(max_t(size_t,
2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
if (!data)
goto out_arr;
- fdt->open_fds = (fd_set *)data;
+ fdt->open_fds = data;
data += nr / BITS_PER_BYTE;
- fdt->close_on_exec = (fd_set *)data;
+ fdt->close_on_exec = data;
fdt->next = NULL;
return fdt;
@@ -275,11 +276,11 @@ static int count_open_files(struct fdtable *fdt)
int i;
/* Find the last open fd */
- for (i = size/(8*sizeof(long)); i > 0; ) {
- if (fdt->open_fds->fds_bits[--i])
+ for (i = size / BITS_PER_LONG; i > 0; ) {
+ if (fdt->open_fds[--i])
break;
}
- i = (i+1) * 8 * sizeof(long);
+ i = (i + 1) * BITS_PER_LONG;
return i;
}
@@ -306,8 +307,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
newf->next_fd = 0;
new_fdt = &newf->fdtab;
new_fdt->max_fds = NR_OPEN_DEFAULT;
- new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
- new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
+ new_fdt->close_on_exec = newf->close_on_exec_init;
+ new_fdt->open_fds = newf->open_fds_init;
new_fdt->fd = &newf->fd_array[0];
new_fdt->next = NULL;
@@ -350,10 +351,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
- memcpy(new_fdt->open_fds->fds_bits,
- old_fdt->open_fds->fds_bits, open_files/8);
- memcpy(new_fdt->close_on_exec->fds_bits,
- old_fdt->close_on_exec->fds_bits, open_files/8);
+ memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
+ memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
@@ -366,7 +365,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
* is partway through open(). So make sure that this
* fd is available to the new process.
*/
- FD_CLR(open_files - i, new_fdt->open_fds);
+ __clear_open_fd(open_files - i, new_fdt);
}
rcu_assign_pointer(*new_fds++, f);
}
@@ -379,11 +378,11 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
memset(new_fds, 0, size);
if (new_fdt->max_fds > open_files) {
- int left = (new_fdt->max_fds-open_files)/8;
- int start = open_files / (8 * sizeof(unsigned long));
+ int left = (new_fdt->max_fds - open_files) / 8;
+ int start = open_files / BITS_PER_LONG;
- memset(&new_fdt->open_fds->fds_bits[start], 0, left);
- memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
+ memset(&new_fdt->open_fds[start], 0, left);
+ memset(&new_fdt->close_on_exec[start], 0, left);
}
rcu_assign_pointer(newf->fdt, new_fdt);
@@ -419,8 +418,8 @@ struct files_struct init_files = {
.fdtab = {
.max_fds = NR_OPEN_DEFAULT,
.fd = &init_files.fd_array[0],
- .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
- .open_fds = (fd_set *)&init_files.open_fds_init,
+ .close_on_exec = init_files.close_on_exec_init,
+ .open_fds = init_files.open_fds_init,
},
.file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
};
@@ -443,8 +442,7 @@ repeat:
fd = files->next_fd;
if (fd < fdt->max_fds)
- fd = find_next_zero_bit(fdt->open_fds->fds_bits,
- fdt->max_fds, fd);
+ fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
error = expand_files(files, fd);
if (error < 0)
@@ -460,11 +458,11 @@ repeat:
if (start <= files->next_fd)
files->next_fd = fd + 1;
- FD_SET(fd, fdt->open_fds);
+ __set_open_fd(fd, fdt);
if (flags & O_CLOEXEC)
- FD_SET(fd, fdt->close_on_exec);
+ __set_close_on_exec(fd, fdt);
else
- FD_CLR(fd, fdt->close_on_exec);
+ __clear_close_on_exec(fd, fdt);
error = fd;
#if 1
/* Sanity check */
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 926d02068a14..922f146e4235 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 404111b016c9..2b60ce1996aa 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
@@ -42,12 +44,13 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
if (IS_ERR(tsk)) {
- printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk));
+ pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
+ -PTR_ERR(tsk));
complete(&c->gc_thread_exit);
ret = PTR_ERR(tsk);
} else {
/* Wait for it... */
- D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid));
+ jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
wait_for_completion(&c->gc_thread_start);
ret = tsk->pid;
}
@@ -60,7 +63,7 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
int wait = 0;
spin_lock(&c->erase_completion_lock);
if (c->gc_task) {
- D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid));
+ jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid);
send_sig(SIGKILL, c->gc_task, 1);
wait = 1;
}
@@ -90,7 +93,7 @@ static int jffs2_garbage_collect_thread(void *_c)
if (!jffs2_thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
+ jffs2_dbg(1, "%s(): sleeping...\n", __func__);
schedule();
} else
spin_unlock(&c->erase_completion_lock);
@@ -109,7 +112,7 @@ static int jffs2_garbage_collect_thread(void *_c)
schedule_timeout_interruptible(msecs_to_jiffies(50));
if (kthread_should_stop()) {
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n"));
+ jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
goto die;
}
@@ -126,28 +129,32 @@ static int jffs2_garbage_collect_thread(void *_c)
switch(signr) {
case SIGSTOP:
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
+ jffs2_dbg(1, "%s(): SIGSTOP received\n",
+ __func__);
set_current_state(TASK_STOPPED);
schedule();
break;
case SIGKILL:
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
+ jffs2_dbg(1, "%s(): SIGKILL received\n",
+ __func__);
goto die;
case SIGHUP:
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
+ jffs2_dbg(1, "%s(): SIGHUP received\n",
+ __func__);
break;
default:
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
+ jffs2_dbg(1, "%s(): signal %ld received\n",
+ __func__, signr);
}
}
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
disallow_signal(SIGHUP);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
+ jffs2_dbg(1, "%s(): pass\n", __func__);
if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
- printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
+ pr_notice("No space for garbage collection. Aborting GC thread\n");
goto die;
}
}
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 3005ec4520ad..a3750f902adc 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -307,8 +309,8 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
trying to GC to make more space. It'll be a fruitless task */
c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
- dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
- c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
+ dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+ c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 96ed3c9ec3fc..4849a4c9a0e2 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "compr.h"
static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
@@ -79,7 +81,7 @@ static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
output_buf = kmalloc(*cdatalen, GFP_KERNEL);
if (!output_buf) {
- printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
+ pr_warn("No memory for compressor allocation. Compression failed.\n");
return ret;
}
orig_slen = *datalen;
@@ -188,7 +190,8 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
tmp_buf = kmalloc(orig_slen, GFP_KERNEL);
spin_lock(&jffs2_compressor_list_lock);
if (!tmp_buf) {
- printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n", orig_slen);
+ pr_warn("No memory for compressor allocation. (%d bytes)\n",
+ orig_slen);
continue;
}
else {
@@ -235,7 +238,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
cpage_out, datalen, cdatalen);
break;
default:
- printk(KERN_ERR "JFFS2: unknown compression mode.\n");
+ pr_err("unknown compression mode\n");
}
if (ret == JFFS2_COMPR_NONE) {
@@ -277,7 +280,8 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
ret = this->decompress(cdata_in, data_out, cdatalen, datalen);
spin_lock(&jffs2_compressor_list_lock);
if (ret) {
- printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret);
+ pr_warn("Decompressor \"%s\" returned %d\n",
+ this->name, ret);
}
else {
this->stat_decompr_blocks++;
@@ -287,7 +291,7 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
return ret;
}
}
- printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype);
+ pr_warn("compression type 0x%02x not available\n", comprtype);
spin_unlock(&jffs2_compressor_list_lock);
return -EIO;
}
@@ -299,7 +303,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp)
struct jffs2_compressor *this;
if (!comp->name) {
- printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n");
+ pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n");
return -1;
}
comp->compr_buf_size=0;
@@ -309,7 +313,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp)
comp->stat_compr_new_size=0;
comp->stat_compr_blocks=0;
comp->stat_decompr_blocks=0;
- D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name));
+ jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
@@ -332,15 +336,15 @@ out:
int jffs2_unregister_compressor(struct jffs2_compressor *comp)
{
- D2(struct jffs2_compressor *this;)
+ D2(struct jffs2_compressor *this);
- D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name));
+ jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
if (comp->usecount) {
spin_unlock(&jffs2_compressor_list_lock);
- printk(KERN_WARNING "JFFS2: Compressor module is in use. Unregister failed.\n");
+ pr_warn("Compressor module is in use. Unregister failed.\n");
return -1;
}
list_del(&comp->list);
@@ -377,17 +381,17 @@ int __init jffs2_compressors_init(void)
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
- D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");)
+ jffs2_dbg(1, "default compression mode: none\n");
#else
#ifdef CONFIG_JFFS2_CMODE_SIZE
jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
- D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");)
+ jffs2_dbg(1, "default compression mode: size\n");
#else
#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO;
- D1(printk(KERN_INFO "JFFS2: default compression mode: favourlzo\n");)
+ jffs2_dbg(1, "default compression mode: favourlzo\n");
#else
- D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");)
+ jffs2_dbg(1, "default compression mode: priority\n");
#endif
#endif
#endif
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
index af186ee674d8..c553bd6506da 100644
--- a/fs/jffs2/compr_lzo.c
+++ b/fs/jffs2/compr_lzo.c
@@ -33,7 +33,6 @@ static int __init alloc_workspace(void)
lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
if (!lzo_mem || !lzo_compress_buf) {
- printk(KERN_WARNING "Failed to allocate lzo deflate workspace\n");
free_workspace();
return -ENOMEM;
}
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 9e7cec808c4c..92e0644bf867 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jffs2.h>
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 5a001020c542..0b9a1e44e833 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -14,6 +14,8 @@
#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/zlib.h>
#include <linux/zutil.h>
@@ -42,18 +44,18 @@ static int __init alloc_workspaces(void)
{
def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
MAX_MEM_LEVEL));
- if (!def_strm.workspace) {
- printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
+ if (!def_strm.workspace)
return -ENOMEM;
- }
- D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)));
+
+ jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n",
+ zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (!inf_strm.workspace) {
- printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
vfree(def_strm.workspace);
return -ENOMEM;
}
- D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()));
+ jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n",
+ zlib_inflate_workspacesize());
return 0;
}
@@ -79,7 +81,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
mutex_lock(&deflate_mutex);
if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
- printk(KERN_WARNING "deflateInit failed\n");
+ pr_warn("deflateInit failed\n");
mutex_unlock(&deflate_mutex);
return -1;
}
@@ -93,13 +95,14 @@ static int jffs2_zlib_compress(unsigned char *data_in,
while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
- D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
- def_strm.avail_in, def_strm.avail_out));
+ jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n",
+ def_strm.avail_in, def_strm.avail_out);
ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
- D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
- def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out));
+ jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
+ def_strm.avail_in, def_strm.avail_out,
+ def_strm.total_in, def_strm.total_out);
if (ret != Z_OK) {
- D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
+ jffs2_dbg(1, "deflate in loop returned %d\n", ret);
zlib_deflateEnd(&def_strm);
mutex_unlock(&deflate_mutex);
return -1;
@@ -111,20 +114,20 @@ static int jffs2_zlib_compress(unsigned char *data_in,
zlib_deflateEnd(&def_strm);
if (ret != Z_STREAM_END) {
- D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
+ jffs2_dbg(1, "final deflate returned %d\n", ret);
ret = -1;
goto out;
}
if (def_strm.total_out >= def_strm.total_in) {
- D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n",
- def_strm.total_in, def_strm.total_out));
+ jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n",
+ def_strm.total_in, def_strm.total_out);
ret = -1;
goto out;
}
- D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n",
- def_strm.total_in, def_strm.total_out));
+ jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n",
+ def_strm.total_in, def_strm.total_out);
*dstlen = def_strm.total_out;
*sourcelen = def_strm.total_in;
@@ -157,18 +160,18 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
((data_in[0] & 0x0f) == Z_DEFLATED) &&
!(((data_in[0]<<8) + data_in[1]) % 31)) {
- D2(printk(KERN_DEBUG "inflate skipping adler32\n"));
+ jffs2_dbg(2, "inflate skipping adler32\n");
wbits = -((data_in[0] >> 4) + 8);
inf_strm.next_in += 2;
inf_strm.avail_in -= 2;
} else {
/* Let this remain D1 for now -- it should never happen */
- D1(printk(KERN_DEBUG "inflate not skipping adler32\n"));
+ jffs2_dbg(1, "inflate not skipping adler32\n");
}
if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
- printk(KERN_WARNING "inflateInit failed\n");
+ pr_warn("inflateInit failed\n");
mutex_unlock(&inflate_mutex);
return 1;
}
@@ -176,7 +179,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
;
if (ret != Z_STREAM_END) {
- printk(KERN_NOTICE "inflate returned %d\n", ret);
+ pr_notice("inflate returned %d\n", ret);
}
zlib_inflateEnd(&inf_strm);
mutex_unlock(&inflate_mutex);
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index e0b76c87a91a..1090eb64b90d 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pagemap.h>
@@ -261,12 +263,15 @@ void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
bad += c->sector_size;
}
-#define check(sz) \
- if (sz != c->sz##_size) { \
- printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \
- sz, c->sz##_size); \
- dump = 1; \
- }
+#define check(sz) \
+do { \
+ if (sz != c->sz##_size) { \
+ pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \
+ #sz, sz, #sz, c->sz##_size); \
+ dump = 1; \
+ } \
+} while (0)
+
check(free);
check(dirty);
check(used);
@@ -274,11 +279,12 @@ void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
check(unchecked);
check(bad);
check(erasing);
+
#undef check
if (nr_counted != c->nr_blocks) {
- printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
- __func__, nr_counted, c->nr_blocks);
+ pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
+ __func__, nr_counted, c->nr_blocks);
dump = 1;
}
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index c4f8eef5ca68..4fd9be4cbc98 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -51,6 +51,7 @@
* superseded by nicer dbg_xxx() macros...
*/
#if CONFIG_JFFS2_FS_DEBUG > 0
+#define DEBUG
#define D1(x) x
#else
#define D1(x)
@@ -62,50 +63,33 @@
#define D2(x)
#endif
+#define jffs2_dbg(level, fmt, ...) \
+do { \
+ if (CONFIG_JFFS2_FS_DEBUG >= level) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
/* The prefixes of JFFS2 messages */
+#define JFFS2_DBG KERN_DEBUG
#define JFFS2_DBG_PREFIX "[JFFS2 DBG]"
-#define JFFS2_ERR_PREFIX "JFFS2 error:"
-#define JFFS2_WARN_PREFIX "JFFS2 warning:"
-#define JFFS2_NOTICE_PREFIX "JFFS2 notice:"
-
-#define JFFS2_ERR KERN_ERR
-#define JFFS2_WARN KERN_WARNING
-#define JFFS2_NOT KERN_NOTICE
-#define JFFS2_DBG KERN_DEBUG
-
#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX
-#define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX
-#define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX
-#define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX
/* JFFS2 message macros */
-#define JFFS2_ERROR(fmt, ...) \
- do { \
- printk(JFFS2_ERR_MSG_PREFIX \
- " (%d) %s: " fmt, task_pid_nr(current), \
- __func__ , ##__VA_ARGS__); \
- } while(0)
+#define JFFS2_ERROR(fmt, ...) \
+ pr_err("error: (%d) %s: " fmt, \
+ task_pid_nr(current), __func__, ##__VA_ARGS__)
#define JFFS2_WARNING(fmt, ...) \
- do { \
- printk(JFFS2_WARN_MSG_PREFIX \
- " (%d) %s: " fmt, task_pid_nr(current), \
- __func__ , ##__VA_ARGS__); \
- } while(0)
+ pr_warn("warning: (%d) %s: " fmt, \
+ task_pid_nr(current), __func__, ##__VA_ARGS__)
#define JFFS2_NOTICE(fmt, ...) \
- do { \
- printk(JFFS2_NOTICE_MSG_PREFIX \
- " (%d) %s: " fmt, task_pid_nr(current), \
- __func__ , ##__VA_ARGS__); \
- } while(0)
+ pr_notice("notice: (%d) %s: " fmt, \
+ task_pid_nr(current), __func__, ##__VA_ARGS__)
#define JFFS2_DEBUG(fmt, ...) \
- do { \
- printk(JFFS2_DBG_MSG_PREFIX \
- " (%d) %s: " fmt, task_pid_nr(current), \
- __func__ , ##__VA_ARGS__); \
- } while(0)
+ printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt, \
+ task_pid_nr(current), __func__, ##__VA_ARGS__)
/*
* We split our debugging messages on several parts, depending on the JFFS2
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 973ac5822bd7..b56018896d5e 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
@@ -79,7 +81,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
uint32_t ino = 0;
struct inode *inode = NULL;
- D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
+ jffs2_dbg(1, "jffs2_lookup()\n");
if (target->d_name.len > JFFS2_MAX_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
@@ -103,7 +105,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
if (ino) {
inode = jffs2_iget(dir_i->i_sb, ino);
if (IS_ERR(inode))
- printk(KERN_WARNING "iget() failed for ino #%u\n", ino);
+ pr_warn("iget() failed for ino #%u\n", ino);
}
return d_splice_alias(inode, target);
@@ -119,21 +121,22 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct jffs2_full_dirent *fd;
unsigned long offset, curofs;
- D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino));
+ jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
+ filp->f_path.dentry->d_inode->i_ino);
f = JFFS2_INODE_INFO(inode);
offset = filp->f_pos;
if (offset == 0) {
- D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino));
+ jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino);
if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
goto out;
offset++;
}
if (offset == 1) {
unsigned long pino = parent_ino(filp->f_path.dentry);
- D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino));
+ jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino);
if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
goto out;
offset++;
@@ -146,16 +149,18 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
curofs++;
/* First loop: curofs = 2; offset = 2 */
if (curofs < offset) {
- D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
- fd->name, fd->ino, fd->type, curofs, offset));
+ jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
+ fd->name, fd->ino, fd->type, curofs, offset);
continue;
}
if (!fd->ino) {
- D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name));
+ jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n",
+ fd->name);
offset++;
continue;
}
- D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type));
+ jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n",
+ offset, fd->name, fd->ino, fd->type);
if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
break;
offset++;
@@ -184,12 +189,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
c = JFFS2_SB_INFO(dir_i->i_sb);
- D1(printk(KERN_DEBUG "jffs2_create()\n"));
+ jffs2_dbg(1, "%s()\n", __func__);
inode = jffs2_new_inode(dir_i, mode, ri);
if (IS_ERR(inode)) {
- D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
+ jffs2_dbg(1, "jffs2_new_inode() failed\n");
jffs2_free_raw_inode(ri);
return PTR_ERR(inode);
}
@@ -217,9 +222,9 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
jffs2_free_raw_inode(ri);
- D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
- inode->i_ino, inode->i_mode, inode->i_nlink,
- f->inocache->pino_nlink, inode->i_mapping->nrpages));
+ jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
+ __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
+ f->inocache->pino_nlink, inode->i_mapping->nrpages);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
@@ -362,14 +367,15 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
/* We use f->target field to store the target path. */
f->target = kmemdup(target, targetlen + 1, GFP_KERNEL);
if (!f->target) {
- printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
+ pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1);
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = -ENOMEM;
goto fail;
}
- D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target));
+ jffs2_dbg(1, "%s(): symlink's target '%s' cached\n",
+ __func__, (char *)f->target);
/* No data here. Only a metadata node, which will be
obsoleted by the first data write
@@ -856,7 +862,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
f->inocache->pino_nlink++;
mutex_unlock(&f->sem);
- printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
+ pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
+ __func__, ret);
/* Might as well let the VFS know */
d_instantiate(new_dentry, old_dentry->d_inode);
ihold(old_dentry->d_inode);
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index eafb8d37a6fb..4a6cf289be24 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -46,11 +48,12 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
#else /* Linux */
struct erase_info *instr;
- D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n",
- jeb->offset, jeb->offset, jeb->offset + c->sector_size));
+ jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
+ __func__,
+ jeb->offset, jeb->offset, jeb->offset + c->sector_size);
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
- printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+ pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
@@ -69,7 +72,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
instr->len = c->sector_size;
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
@@ -84,7 +86,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
- D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
+ jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
+ jeb->offset, ret);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
@@ -97,9 +100,11 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
}
if (ret == -EROFS)
- printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
+ pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
+ jeb->offset);
else
- printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
+ pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
+ jeb->offset, ret);
jffs2_erase_failed(c, jeb, bad_offset);
}
@@ -125,13 +130,14 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
work_done++;
if (!--count) {
- D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
+ jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
goto done;
}
} else if (!list_empty(&c->erase_pending_list)) {
jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
- D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
+ jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
+ jeb->offset);
list_del(&jeb->list);
c->erasing_size += c->sector_size;
c->wasted_size -= jeb->wasted_size;
@@ -159,13 +165,13 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
done:
- D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
+ jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
return work_done;
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
- D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
+ jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
@@ -214,7 +220,7 @@ static void jffs2_erase_callback(struct erase_info *instr)
struct erase_priv_struct *priv = (void *)instr->priv;
if(instr->state != MTD_ERASE_DONE) {
- printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
+ pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
(unsigned long long)instr->addr, instr->state);
jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
} else {
@@ -269,8 +275,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
return;
}
- D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
- jeb->offset, jeb->offset + c->sector_size, ic->ino));
+ jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
+ jeb->offset, jeb->offset + c->sector_size, ic->ino);
D2({
int i=0;
@@ -281,7 +287,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
printk(KERN_DEBUG);
while(this) {
- printk(KERN_CONT "0x%08x(%d)->",
+ pr_cont("0x%08x(%d)->",
ref_offset(this), ref_flags(this));
if (++i == 5) {
printk(KERN_DEBUG);
@@ -289,7 +295,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
}
this = this->next_in_ino;
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
});
switch (ic->class) {
@@ -310,7 +316,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *block, *ref;
- D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
+ jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
+ jeb->offset);
block = ref = jeb->first_node;
@@ -342,12 +349,13 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
&ebuf, NULL);
if (ret != -EOPNOTSUPP) {
if (ret) {
- D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+ jffs2_dbg(1, "MTD point failed %d\n", ret);
goto do_flash_read;
}
if (retlen < c->sector_size) {
/* Don't muck about if it won't let us point to the whole erase sector */
- D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
+ jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
+ retlen);
mtd_unpoint(c->mtd, jeb->offset, retlen);
goto do_flash_read;
}
@@ -359,8 +367,10 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
} while(--retlen);
mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
if (retlen) {
- printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
- *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf));
+ pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
+ *wordebuf,
+ jeb->offset +
+ c->sector_size-retlen * sizeof(*wordebuf));
return -EIO;
}
return 0;
@@ -368,11 +378,12 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
do_flash_read:
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
- printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
+ pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
+ jeb->offset);
return -EAGAIN;
}
- D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
+ jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
@@ -382,12 +393,14 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
- printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+ pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
+ ofs, ret);
ret = -EIO;
goto fail;
}
if (retlen != readlen) {
- printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+ pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
+ ofs, readlen, retlen);
ret = -EIO;
goto fail;
}
@@ -396,7 +409,8 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
unsigned long *datum = ebuf + i;
if (*datum + 1) {
*bad_offset += i;
- printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
+ pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
+ *datum, *bad_offset);
ret = -EIO;
goto fail;
}
@@ -422,7 +436,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
}
/* Write the erase complete marker */
- D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
+ jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
bad_offset = jeb->offset;
/* Cleanmarker in oob area or no cleanmarker at all ? */
@@ -451,10 +465,10 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
if (ret || retlen != sizeof(marker)) {
if (ret)
- printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
+ pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
else
- printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
+ pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto filebad;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 61e6723535b9..db3889ba8818 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/time.h>
@@ -85,7 +87,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
unsigned char *pg_buf;
int ret;
- D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
+ jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
+ __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT);
BUG_ON(!PageLocked(pg));
@@ -105,7 +108,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
flush_dcache_page(pg);
kunmap(pg);
- D2(printk(KERN_DEBUG "readpage finished\n"));
+ jffs2_dbg(2, "readpage finished\n");
return ret;
}
@@ -144,7 +147,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
return -ENOMEM;
*pagep = pg;
- D1(printk(KERN_DEBUG "jffs2_write_begin()\n"));
+ jffs2_dbg(1, "%s()\n", __func__);
if (pageofs > inode->i_size) {
/* Make new hole frag from old EOF to new page */
@@ -153,8 +156,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
struct jffs2_full_dnode *fn;
uint32_t alloc_len;
- D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
- (unsigned int)inode->i_size, pageofs));
+ jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+ (unsigned int)inode->i_size, pageofs);
ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@@ -198,7 +201,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
f->metadata = NULL;
}
if (ret) {
- D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret));
+ jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n",
+ ret);
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
@@ -222,7 +226,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
if (ret)
goto out_page;
}
- D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags));
+ jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
return ret;
out_page:
@@ -248,8 +252,9 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
int ret = 0;
uint32_t writtenlen = 0;
- D1(printk(KERN_DEBUG "jffs2_write_end(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
- inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
+ jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
+ __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
+ start, end, pg->flags);
/* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So the page must be
@@ -268,7 +273,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
ri = jffs2_alloc_raw_inode();
if (!ri) {
- D1(printk(KERN_DEBUG "jffs2_write_end(): Allocation of raw inode failed\n"));
+ jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
+ __func__);
unlock_page(pg);
page_cache_release(pg);
return -ENOMEM;
@@ -315,13 +321,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
/* generic_file_write has written more to the page cache than we've
actually written to the medium. Mark the page !Uptodate so that
it gets reread */
- D1(printk(KERN_DEBUG "jffs2_write_end(): Not all bytes written. Marking page !uptodate\n"));
+ jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
+ __func__);
SetPageError(pg);
ClearPageUptodate(pg);
}
- D1(printk(KERN_DEBUG "jffs2_write_end() returning %d\n",
- writtenlen > 0 ? writtenlen : ret));
+ jffs2_dbg(1, "%s() returning %d\n",
+ __func__, writtenlen > 0 ? writtenlen : ret);
unlock_page(pg);
page_cache_release(pg);
return writtenlen > 0 ? writtenlen : ret;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index c0d5c9d770da..bb6f993ebca9 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -39,7 +41,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
int ret;
int alloc_type = ALLOC_NORMAL;
- D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
+ jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
/* Special cases - we don't want more than one data node
for these types on the medium at any time. So setattr
@@ -50,7 +52,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
/* For these, we don't actually need to read the old node */
mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
mdata = (char *)&dev;
- D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
+ jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
+ __func__, mdatalen);
} else if (S_ISLNK(inode->i_mode)) {
mutex_lock(&f->sem);
mdatalen = f->metadata->size;
@@ -66,7 +69,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return ret;
}
mutex_unlock(&f->sem);
- D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
+ jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
+ __func__, mdatalen);
}
ri = jffs2_alloc_raw_inode();
@@ -233,7 +237,8 @@ void jffs2_evict_inode (struct inode *inode)
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+ jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
+ __func__, inode->i_ino, inode->i_mode);
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
jffs2_do_clear_inode(c, f);
@@ -249,7 +254,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
dev_t rdev = 0;
int ret;
- D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
+ jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
inode = iget_locked(sb, ino);
if (!inode)
@@ -317,14 +322,16 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
/* Read the device numbers from the media */
if (f->metadata->size != sizeof(jdev.old_id) &&
f->metadata->size != sizeof(jdev.new_id)) {
- printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
+ pr_notice("Device node has strange size %d\n",
+ f->metadata->size);
goto error_io;
}
- D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
+ jffs2_dbg(1, "Reading device numbers from flash\n");
ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
if (ret < 0) {
/* Eep */
- printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
+ pr_notice("Read device numbers for inode %lu failed\n",
+ (unsigned long)inode->i_ino);
goto error;
}
if (f->metadata->size == sizeof(jdev.old_id))
@@ -339,12 +346,13 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
break;
default:
- printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
+ pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
+ __func__, inode->i_mode, (unsigned long)inode->i_ino);
}
mutex_unlock(&f->sem);
- D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+ jffs2_dbg(1, "jffs2_read_inode() returning\n");
unlock_new_inode(inode);
return inode;
@@ -362,11 +370,13 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
struct iattr iattr;
if (!(inode->i_state & I_DIRTY_DATASYNC)) {
- D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
+ jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
+ __func__, inode->i_ino);
return;
}
- D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
+ jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
+ __func__, inode->i_ino);
iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
iattr.ia_mode = inode->i_mode;
@@ -414,7 +424,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
struct jffs2_inode_info *f;
int ret;
- D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
+ jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
+ __func__, dir_i->i_ino, mode);
c = JFFS2_SB_INFO(sb);
@@ -504,11 +515,11 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
if (c->mtd->type == MTD_NANDFLASH) {
- printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
+ pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
return -EINVAL;
}
if (c->mtd->type == MTD_DATAFLASH) {
- printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n");
+ pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
return -EINVAL;
}
#endif
@@ -522,12 +533,13 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
*/
if ((c->sector_size * blocks) != c->flash_size) {
c->flash_size = c->sector_size * blocks;
- printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
+ pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
c->flash_size / 1024);
}
if (c->flash_size < 5*c->sector_size) {
- printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
+ pr_err("Too few erase blocks (%d)\n",
+ c->flash_size / c->sector_size);
return -EINVAL;
}
@@ -550,17 +562,17 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
if ((ret = jffs2_do_mount_fs(c)))
goto out_inohash;
- D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
+ jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
root_i = jffs2_iget(sb, 1);
if (IS_ERR(root_i)) {
- D1(printk(KERN_WARNING "get root inode failed\n"));
+ jffs2_dbg(1, "get root inode failed\n");
ret = PTR_ERR(root_i);
goto out_root;
}
ret = -ENOMEM;
- D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
+ jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
sb->s_root = d_make_root(root_i);
if (!sb->s_root)
goto out_root;
@@ -618,20 +630,21 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
*/
inode = ilookup(OFNI_BS_2SFFJ(c), inum);
if (!inode) {
- D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
- inum));
+ jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
+ inum);
spin_lock(&c->inocache_lock);
ic = jffs2_get_ino_cache(c, inum);
if (!ic) {
- D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
+ jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
+ inum);
spin_unlock(&c->inocache_lock);
return NULL;
}
if (ic->state != INO_STATE_CHECKEDABSENT) {
/* Wait for progress. Don't just loop */
- D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
- ic->ino, ic->state));
+ jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
+ ic->ino, ic->state);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
} else {
spin_unlock(&c->inocache_lock);
@@ -649,8 +662,8 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
return ERR_CAST(inode);
}
if (is_bad_inode(inode)) {
- printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n",
- inum, unlinked);
+ pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
+ inum, unlinked);
/* NB. This will happen again. We need to do something appropriate here. */
iput(inode);
return ERR_PTR(-EIO);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 31dce611337c..ad271c70aa25 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
@@ -51,44 +53,44 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
number of free blocks is low. */
again:
if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
- D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
+ jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
nextlist = &c->bad_used_list;
} else if (n < 50 && !list_empty(&c->erasable_list)) {
/* Note that most of them will have gone directly to be erased.
So don't favour the erasable_list _too_ much. */
- D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
+ jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
nextlist = &c->erasable_list;
} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
/* Most of the time, pick one off the very_dirty list */
- D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
+ jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
nextlist = &c->very_dirty_list;
} else if (n < 126 && !list_empty(&c->dirty_list)) {
- D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
+ jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
nextlist = &c->dirty_list;
} else if (!list_empty(&c->clean_list)) {
- D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
+ jffs2_dbg(1, "Picking block from clean_list to GC next\n");
nextlist = &c->clean_list;
} else if (!list_empty(&c->dirty_list)) {
- D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
+ jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
nextlist = &c->dirty_list;
} else if (!list_empty(&c->very_dirty_list)) {
- D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
+ jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
nextlist = &c->very_dirty_list;
} else if (!list_empty(&c->erasable_list)) {
- D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
+ jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
nextlist = &c->erasable_list;
} else if (!list_empty(&c->erasable_pending_wbuf_list)) {
/* There are blocks are wating for the wbuf sync */
- D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"));
+ jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
goto again;
} else {
/* Eep. All were empty */
- D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
+ jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
return NULL;
}
@@ -97,13 +99,15 @@ again:
c->gcblock = ret;
ret->gc_node = ret->first_node;
if (!ret->gc_node) {
- printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
+ pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n",
+ ret->offset);
BUG();
}
/* Have we accidentally picked a clean block with wasted space ? */
if (ret->wasted_size) {
- D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+ jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
+ ret->wasted_size);
ret->dirty_size += ret->wasted_size;
c->wasted_size -= ret->wasted_size;
c->dirty_size += ret->wasted_size;
@@ -140,8 +144,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
/* checked_ino is protected by the alloc_sem */
if (c->checked_ino > c->highest_ino && xattr) {
- printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
- c->unchecked_size);
+ pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
+ c->unchecked_size);
jffs2_dbg_dump_block_lists_nolock(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
@@ -163,8 +167,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
}
if (!ic->pino_nlink) {
- D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n",
- ic->ino));
+ jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
+ ic->ino);
spin_unlock(&c->inocache_lock);
jffs2_xattr_delete_inode(c, ic);
continue;
@@ -172,13 +176,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
switch(ic->state) {
case INO_STATE_CHECKEDABSENT:
case INO_STATE_PRESENT:
- D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
+ jffs2_dbg(1, "Skipping ino #%u already checked\n",
+ ic->ino);
spin_unlock(&c->inocache_lock);
continue;
case INO_STATE_GC:
case INO_STATE_CHECKING:
- printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
+ pr_warn("Inode #%u is in state %d during CRC check phase!\n",
+ ic->ino, ic->state);
spin_unlock(&c->inocache_lock);
BUG();
@@ -186,7 +192,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
/* We need to wait for it to finish, lest we move on
and trigger the BUG() above while we haven't yet
finished checking all its nodes */
- D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
+ jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
+ ic->ino);
/* We need to come back again for the _same_ inode. We've
made no progress in this case, but that should be OK */
c->checked_ino--;
@@ -204,11 +211,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
ic->state = INO_STATE_CHECKING;
spin_unlock(&c->inocache_lock);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
+ jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
+ __func__, ic->ino);
ret = jffs2_do_crccheck_inode(c, ic);
if (ret)
- printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
+ pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n",
+ ic->ino);
jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
mutex_unlock(&c->alloc_sem);
@@ -220,11 +229,11 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
!list_empty(&c->erase_pending_list)) {
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n"));
+ jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
if (jffs2_erase_pending_blocks(c, 1))
return 0;
- D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
+ jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
spin_lock(&c->erase_completion_lock);
mutex_lock(&c->alloc_sem);
}
@@ -242,13 +251,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
mutex_unlock(&c->alloc_sem);
return -EAGAIN;
}
- D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+ jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n");
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -EIO;
}
- D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
+ jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
D1(if (c->nextblock)
printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
@@ -261,12 +271,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
gcblock_dirty = jeb->dirty_size;
while(ref_obsolete(raw)) {
- D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
+ jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
+ ref_offset(raw));
raw = ref_next(raw);
if (unlikely(!raw)) {
- printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
- printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
+ pr_warn("eep. End of raw list while still supposedly nodes to GC\n");
+ pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
+ jeb->offset, jeb->free_size,
+ jeb->dirty_size, jeb->used_size);
jeb->gc_node = raw;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
@@ -275,7 +287,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
}
jeb->gc_node = raw;
- D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
+ jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
+ ref_offset(raw));
if (!raw->next_in_ino) {
/* Inode-less node. Clean marker, snapshot or something like that */
@@ -316,7 +329,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
+ jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
+ __func__, jeb->offset, ref_offset(raw), ref_flags(raw),
+ ic->ino);
/* Three possibilities:
1. Inode is already in-core. We must iget it and do proper
@@ -336,8 +351,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
if (ref_flags(raw) == REF_PRISTINE)
ic->state = INO_STATE_GC;
else {
- D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
- ic->ino));
+ jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
+ ic->ino);
}
break;
@@ -353,8 +368,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
we're holding the alloc_sem, no other garbage collection
can happen.
*/
- printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
- ic->ino, ic->state);
+ pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
+ ic->ino, ic->state);
mutex_unlock(&c->alloc_sem);
spin_unlock(&c->inocache_lock);
BUG();
@@ -367,8 +382,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
drop the alloc_sem before sleeping. */
mutex_unlock(&c->alloc_sem);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
- ic->ino, ic->state));
+ jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
+ __func__, ic->ino, ic->state);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
/* And because we dropped the alloc_sem we must start again from the
beginning. Ponder chance of livelock here -- we're returning success
@@ -433,7 +448,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
test_gcnode:
if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
/* Eep. This really should never happen. GC is broken */
- printk(KERN_ERR "Error garbage collecting node at %08x!\n", ref_offset(jeb->gc_node));
+ pr_err("Error garbage collecting node at %08x!\n",
+ ref_offset(jeb->gc_node));
ret = -ENOSPC;
}
release_sem:
@@ -445,7 +461,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
eraseit:
if (c->gcblock && !c->gcblock->used_size) {
- D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
+ jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
+ c->gcblock->offset);
/* We're GC'ing an empty block? */
list_add_tail(&c->gcblock->list, &c->erase_pending_list);
c->gcblock = NULL;
@@ -475,12 +492,12 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
if (c->gcblock != jeb) {
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
+ jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
goto upnout;
}
if (ref_obsolete(raw)) {
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
+ jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
/* They'll call again */
goto upnout;
}
@@ -536,10 +553,10 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
} else if (fd) {
ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
} else {
- printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
- ref_offset(raw), f->inocache->ino);
+ pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n",
+ ref_offset(raw), f->inocache->ino);
if (ref_obsolete(raw)) {
- printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
+ pr_warn("But it's obsolete so we don't mind too much\n");
} else {
jffs2_dbg_dump_node(c, ref_offset(raw));
BUG();
@@ -562,7 +579,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
uint32_t crc, rawlen;
int retried = 0;
- D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
+ jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
+ ref_offset(raw));
alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
@@ -595,8 +613,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
if (je32_to_cpu(node->u.hdr_crc) != crc) {
- printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
+ pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
goto bail;
}
@@ -604,16 +622,18 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
case JFFS2_NODETYPE_INODE:
crc = crc32(0, node, sizeof(node->i)-8);
if (je32_to_cpu(node->i.node_crc) != crc) {
- printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
+ pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->i.node_crc),
+ crc);
goto bail;
}
if (je32_to_cpu(node->i.dsize)) {
crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
if (je32_to_cpu(node->i.data_crc) != crc) {
- printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
+ pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw),
+ je32_to_cpu(node->i.data_crc), crc);
goto bail;
}
}
@@ -622,21 +642,24 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
case JFFS2_NODETYPE_DIRENT:
crc = crc32(0, node, sizeof(node->d)-8);
if (je32_to_cpu(node->d.node_crc) != crc) {
- printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
+ pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw),
+ je32_to_cpu(node->d.node_crc), crc);
goto bail;
}
if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
- printk(KERN_WARNING "Name in dirent node at 0x%08x contains zeroes\n", ref_offset(raw));
+ pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
+ ref_offset(raw));
goto bail;
}
if (node->d.nsize) {
crc = crc32(0, node->d.name, node->d.nsize);
if (je32_to_cpu(node->d.name_crc) != crc) {
- printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
+ pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw),
+ je32_to_cpu(node->d.name_crc), crc);
goto bail;
}
}
@@ -644,8 +667,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
default:
/* If it's inode-less, we don't _know_ what it is. Just copy it intact */
if (ic) {
- printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
- ref_offset(raw), je16_to_cpu(node->u.nodetype));
+ pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
+ ref_offset(raw), je16_to_cpu(node->u.nodetype));
goto bail;
}
}
@@ -657,12 +680,13 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
if (ret || (retlen != rawlen)) {
- printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
- rawlen, phys_ofs, ret, retlen);
+ pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ rawlen, phys_ofs, ret, retlen);
if (retlen) {
jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs);
+ pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
+ phys_ofs);
}
if (!retried) {
/* Try to reallocate space and retry */
@@ -671,7 +695,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
retried = 1;
- D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
+ jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -681,14 +705,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
it is only an upper estimation */
if (!ret) {
- D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
+ jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
+ phys_ofs);
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
- D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+ jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
+ ret);
}
if (!ret)
@@ -698,7 +724,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
jffs2_mark_node_obsolete(c, raw);
- D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
+ jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
+ ref_offset(raw));
out_node:
kfree(node);
@@ -725,29 +752,32 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
/* For these, we don't actually need to read the old node */
mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
mdata = (char *)&dev;
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
+ jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
+ __func__, mdatalen);
} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
mdatalen = fn->size;
mdata = kmalloc(fn->size, GFP_KERNEL);
if (!mdata) {
- printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
+ pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
return -ENOMEM;
}
ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
if (ret) {
- printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
+ pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
+ ret);
kfree(mdata);
return ret;
}
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
+ jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
+ __func__, mdatalen);
}
ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
- sizeof(ri)+ mdatalen, ret);
+ pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
+ sizeof(ri) + mdatalen, ret);
goto out;
}
@@ -784,7 +814,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
if (IS_ERR(new_fn)) {
- printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+ pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
ret = PTR_ERR(new_fn);
goto out;
}
@@ -827,14 +857,15 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
- sizeof(rd)+rd.nsize, ret);
+ pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
+ sizeof(rd)+rd.nsize, ret);
return ret;
}
new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
if (IS_ERR(new_fd)) {
- printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
+ pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n",
+ PTR_ERR(new_fd));
return PTR_ERR(new_fd);
}
jffs2_add_fd_to_list(c, new_fd, &f->dents);
@@ -887,19 +918,22 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
continue;
- D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
+ jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
+ ref_offset(raw));
/* This is an obsolete node belonging to the same directory, and it's of the right
length. We need to take a closer look...*/
ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
if (ret) {
- printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
+ pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
+ __func__, ret, ref_offset(raw));
/* If we can't read it, we don't need to continue to obsolete it. Continue */
continue;
}
if (retlen != rawlen) {
- printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
- retlen, rawlen, ref_offset(raw));
+ pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
+ __func__, retlen, rawlen,
+ ref_offset(raw));
continue;
}
@@ -923,8 +957,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
a new deletion dirent to replace it */
mutex_unlock(&c->erase_free_sem);
- D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
- ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
+ jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
+ ref_offset(fd->raw), fd->name,
+ ref_offset(raw), je32_to_cpu(rd->ino));
kfree(rd);
return jffs2_garbage_collect_dirent(c, jeb, f, fd);
@@ -947,7 +982,8 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
fdp = &(*fdp)->next;
}
if (!found) {
- printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
+ pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n",
+ fd->name, f->inocache->ino);
}
jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
@@ -964,8 +1000,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
uint32_t alloclen, ilen;
int ret;
- D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
- f->inocache->ino, start, end));
+ jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
+ f->inocache->ino, start, end);
memset(&ri, 0, sizeof(ri));
@@ -976,35 +1012,37 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
write it out again with the _same_ version as before */
ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
if (readlen != sizeof(ri) || ret) {
- printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
+ pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n",
+ ret, readlen);
goto fill;
}
if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
- ref_offset(fn->raw),
- je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
+ pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
+ __func__, ref_offset(fn->raw),
+ je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
return -EIO;
}
if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
- ref_offset(fn->raw),
- je32_to_cpu(ri.totlen), sizeof(ri));
+ pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
+ __func__, ref_offset(fn->raw),
+ je32_to_cpu(ri.totlen), sizeof(ri));
return -EIO;
}
crc = crc32(0, &ri, sizeof(ri)-8);
if (crc != je32_to_cpu(ri.node_crc)) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
- ref_offset(fn->raw),
- je32_to_cpu(ri.node_crc), crc);
+ pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
+ __func__, ref_offset(fn->raw),
+ je32_to_cpu(ri.node_crc), crc);
/* FIXME: We could possibly deal with this by writing new holes for each frag */
- printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
- start, end, f->inocache->ino);
+ pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+ start, end, f->inocache->ino);
goto fill;
}
if (ri.compr != JFFS2_COMPR_ZERO) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
- printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
- start, end, f->inocache->ino);
+ pr_warn("%s(): Node 0x%08x wasn't a hole node!\n",
+ __func__, ref_offset(fn->raw));
+ pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+ start, end, f->inocache->ino);
goto fill;
}
} else {
@@ -1043,14 +1081,14 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
- sizeof(ri), ret);
+ pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
+ sizeof(ri), ret);
return ret;
}
new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
if (IS_ERR(new_fn)) {
- printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
+ pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn));
return PTR_ERR(new_fn);
}
if (je32_to_cpu(ri.version) == f->highest_version) {
@@ -1070,9 +1108,9 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
* above.)
*/
D1(if(unlikely(fn->frags <= 1)) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
- fn->frags, je32_to_cpu(ri.version), f->highest_version,
- je32_to_cpu(ri.ino));
+ pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
+ __func__, fn->frags, je32_to_cpu(ri.version),
+ f->highest_version, je32_to_cpu(ri.ino));
});
/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
@@ -1089,11 +1127,11 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
}
}
if (fn->frags) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n");
+ pr_warn("%s(): Old node still has frags!\n", __func__);
BUG();
}
if (!new_fn->frags) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
+ pr_warn("%s(): New node has no frags!\n", __func__);
BUG();
}
@@ -1117,8 +1155,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
memset(&ri, 0, sizeof(ri));
- D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
- f->inocache->ino, start, end));
+ jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
+ f->inocache->ino, start, end);
orig_end = end;
orig_start = start;
@@ -1149,15 +1187,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
/* If the previous frag doesn't even reach the beginning, there's
excessive fragmentation. Just merge. */
if (frag->ofs > min) {
- D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
- frag->ofs, frag->ofs+frag->size));
+ jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
+ frag->ofs, frag->ofs+frag->size);
start = frag->ofs;
continue;
}
/* OK. This frag holds the first byte of the page. */
if (!frag->node || !frag->node->raw) {
- D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
- frag->ofs, frag->ofs+frag->size));
+ jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
+ frag->ofs, frag->ofs+frag->size);
break;
} else {
@@ -1171,19 +1209,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
jeb = &c->blocks[raw->flash_offset / c->sector_size];
if (jeb == c->gcblock) {
- D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
- frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+ jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+ frag->ofs,
+ frag->ofs + frag->size,
+ ref_offset(raw));
start = frag->ofs;
break;
}
if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
- D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
- frag->ofs, frag->ofs+frag->size, jeb->offset));
+ jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
+ frag->ofs,
+ frag->ofs + frag->size,
+ jeb->offset);
break;
}
- D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
- frag->ofs, frag->ofs+frag->size, jeb->offset));
+ jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
+ frag->ofs,
+ frag->ofs + frag->size,
+ jeb->offset);
start = frag->ofs;
break;
}
@@ -1199,15 +1243,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
/* If the previous frag doesn't even reach the beginning, there's lots
of fragmentation. Just merge. */
if (frag->ofs+frag->size < max) {
- D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
- frag->ofs, frag->ofs+frag->size));
+ jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
+ frag->ofs, frag->ofs+frag->size);
end = frag->ofs + frag->size;
continue;
}
if (!frag->node || !frag->node->raw) {
- D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
- frag->ofs, frag->ofs+frag->size));
+ jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
+ frag->ofs, frag->ofs+frag->size);
break;
} else {
@@ -1221,25 +1265,31 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
jeb = &c->blocks[raw->flash_offset / c->sector_size];
if (jeb == c->gcblock) {
- D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
- frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+ jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+ frag->ofs,
+ frag->ofs + frag->size,
+ ref_offset(raw));
end = frag->ofs + frag->size;
break;
}
if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
- D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
- frag->ofs, frag->ofs+frag->size, jeb->offset));
+ jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
+ frag->ofs,
+ frag->ofs + frag->size,
+ jeb->offset);
break;
}
- D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
- frag->ofs, frag->ofs+frag->size, jeb->offset));
+ jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
+ frag->ofs,
+ frag->ofs + frag->size,
+ jeb->offset);
end = frag->ofs + frag->size;
break;
}
}
- D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
- orig_start, orig_end, start, end));
+ jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
+ orig_start, orig_end, start, end);
D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
BUG_ON(end < orig_end);
@@ -1256,7 +1306,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
if (IS_ERR(pg_ptr)) {
- printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
+ pr_warn("read_cache_page() returned error: %ld\n",
+ PTR_ERR(pg_ptr));
return PTR_ERR(pg_ptr);
}
@@ -1270,8 +1321,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
&alloclen, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
- sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
+ pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
+ sizeof(ri) + JFFS2_MIN_DATA_LEN, ret);
break;
}
cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
@@ -1308,7 +1359,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
jffs2_free_comprbuf(comprbuf, writebuf);
if (IS_ERR(new_fn)) {
- printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+ pr_warn("Error writing new dnode: %ld\n",
+ PTR_ERR(new_fn));
ret = PTR_ERR(new_fn);
break;
}
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index c082868910f2..4f47aa24b556 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 5e03233c2363..975a1f562c10 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -687,8 +689,8 @@ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
if (!size)
return 0;
if (unlikely(size > jeb->free_size)) {
- printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
- size, jeb->free_size, jeb->wasted_size);
+ pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
+ size, jeb->free_size, jeb->wasted_size);
BUG();
}
/* REF_EMPTY_NODE is !obsolete, so that works OK */
@@ -726,8 +728,10 @@ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
/* Last node in block. Use free_space */
if (unlikely(ref != jeb->last_node)) {
- printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
- ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
+ pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
+ ref, ref_offset(ref), jeb->last_node,
+ jeb->last_node ?
+ ref_offset(jeb->last_node) : 0);
BUG();
}
ref_end = jeb->offset + c->sector_size - jeb->free_size;
@@ -747,16 +751,20 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
if (!jeb)
jeb = &c->blocks[ref->flash_offset / c->sector_size];
- printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
- ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
- ret, ref->__totlen);
+ pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
+ ref, ref_offset(ref), ref_offset(ref) + ref->__totlen,
+ ret, ref->__totlen);
if (ref_next(ref)) {
- printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)),
- ref_offset(ref_next(ref))+ref->__totlen);
+ pr_crit("next %p (0x%08x-0x%08x)\n",
+ ref_next(ref), ref_offset(ref_next(ref)),
+ ref_offset(ref_next(ref)) + ref->__totlen);
} else
- printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
+ pr_crit("No next ref. jeb->last_node is %p\n",
+ jeb->last_node);
- printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
+ pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n",
+ jeb->wasted_size, jeb->dirty_size, jeb->used_size,
+ jeb->free_size);
#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
__jffs2_dbg_dump_node_refs_nolock(c, jeb);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 694aa5b03505..6784d1e7a7eb 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
@@ -46,10 +48,10 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
/* align it */
minsize = PAD(minsize);
- D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
+ jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
mutex_lock(&c->alloc_sem);
- D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
+ jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
spin_lock(&c->erase_completion_lock);
@@ -73,11 +75,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
if (dirty < c->nospc_dirty_size) {
if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
- D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
+ jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
+ __func__);
break;
}
- D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
- dirty, c->unchecked_size, c->sector_size));
+ jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
+ dirty, c->unchecked_size,
+ c->sector_size);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
@@ -96,12 +100,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
if ( (avail / c->sector_size) <= blocksneeded) {
if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
- D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
+ jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
+ __func__);
break;
}
- D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
- avail, blocksneeded * c->sector_size));
+ jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
+ avail, blocksneeded * c->sector_size);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -ENOSPC;
@@ -109,9 +114,14 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
mutex_unlock(&c->alloc_sem);
- D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
- c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
- c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
+ jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
+ c->nr_free_blocks, c->nr_erasing_blocks,
+ c->free_size, c->dirty_size, c->wasted_size,
+ c->used_size, c->erasing_size, c->bad_size,
+ c->free_size + c->dirty_size +
+ c->wasted_size + c->used_size +
+ c->erasing_size + c->bad_size,
+ c->flash_size);
spin_unlock(&c->erase_completion_lock);
ret = jffs2_garbage_collect_pass(c);
@@ -124,7 +134,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&c->erase_wait, &wait);
- D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__));
+ jffs2_dbg(1, "%s waiting for erase to complete\n",
+ __func__);
spin_unlock(&c->erase_completion_lock);
schedule();
@@ -144,7 +155,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
- D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
+ jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
}
}
spin_unlock(&c->erase_completion_lock);
@@ -161,13 +172,14 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
int ret = -EAGAIN;
minsize = PAD(minsize);
- D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
+ jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
spin_lock(&c->erase_completion_lock);
while(ret == -EAGAIN) {
ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
- D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
+ jffs2_dbg(1, "%s(): looping, ret is %d\n",
+ __func__, ret);
}
}
spin_unlock(&c->erase_completion_lock);
@@ -184,8 +196,8 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo
{
if (c->nextblock == NULL) {
- D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
- jeb->offset));
+ jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
+ __func__, jeb->offset);
return;
}
/* Check, if we have a dirty block now, or if it was dirty already */
@@ -195,17 +207,20 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo
jeb->dirty_size += jeb->wasted_size;
jeb->wasted_size = 0;
if (VERYDIRTY(c, jeb->dirty_size)) {
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size,
+ jeb->used_size);
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size,
+ jeb->used_size);
list_add_tail(&jeb->list, &c->dirty_list);
}
} else {
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size,
+ jeb->used_size);
list_add_tail(&jeb->list, &c->clean_list);
}
c->nextblock = NULL;
@@ -230,13 +245,14 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
list_move_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
- D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
- ejeb->offset));
+ jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
+ __func__, ejeb->offset);
}
if (!c->nr_erasing_blocks &&
!list_empty(&c->erasable_pending_wbuf_list)) {
- D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
+ jffs2_dbg(1, "%s(): Flushing write buffer\n",
+ __func__);
/* c->nextblock is NULL, no update to c->nextblock allowed */
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
@@ -248,9 +264,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
if (!c->nr_erasing_blocks) {
/* Ouch. We're in GC, or we wouldn't have got here.
And there's no space left. At all. */
- printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
- c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
- list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
+ pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
+ c->nr_erasing_blocks, c->nr_free_blocks,
+ list_empty(&c->erasable_list) ? "yes" : "no",
+ list_empty(&c->erasing_list) ? "yes" : "no",
+ list_empty(&c->erase_pending_list) ? "yes" : "no");
return -ENOSPC;
}
@@ -278,7 +296,8 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
c->wbuf_ofs = 0xffffffff;
#endif
- D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
+ jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
+ __func__, c->nextblock->offset);
return 0;
}
@@ -345,7 +364,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
if (jffs2_wbuf_dirty(c)) {
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
+ jffs2_dbg(1, "%s(): Flushing write buffer\n",
+ __func__);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
jeb = c->nextblock;
@@ -387,7 +407,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
jeb = c->nextblock;
if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
- printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
+ pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
+ jeb->offset, jeb->free_size);
goto restart;
}
}
@@ -408,8 +429,9 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
spin_lock(&c->erase_completion_lock);
}
- D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
- *len, jeb->offset + (c->sector_size - jeb->free_size)));
+ jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
+ __func__,
+ *len, jeb->offset + (c->sector_size - jeb->free_size));
return 0;
}
@@ -434,20 +456,22 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
jeb = &c->blocks[ofs / c->sector_size];
- D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
- ofs & ~3, ofs & 3, len));
+ jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
+ __func__, ofs & ~3, ofs & 3, len);
#if 1
/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
if c->nextblock is set. Note that wbuf.c will file obsolete nodes
even after refiling c->nextblock */
if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
&& (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
- printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
+ pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
+ ofs & ~3, ofs & 3);
if (c->nextblock)
- printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
+ pr_warn("nextblock 0x%08x", c->nextblock->offset);
else
- printk(KERN_WARNING "No nextblock");
- printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
+ pr_warn("No nextblock");
+ pr_cont(", expected at %08x\n",
+ jeb->offset + (c->sector_size - jeb->free_size));
return ERR_PTR(-EINVAL);
}
#endif
@@ -457,8 +481,9 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size,
+ jeb->used_size);
if (jffs2_wbuf_dirty(c)) {
/* Flush the last write in the block if it's outstanding */
spin_unlock(&c->erase_completion_lock);
@@ -480,7 +505,7 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
void jffs2_complete_reservation(struct jffs2_sb_info *c)
{
- D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
+ jffs2_dbg(1, "jffs2_complete_reservation()\n");
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
@@ -493,7 +518,7 @@ static inline int on_list(struct list_head *obj, struct list_head *head)
list_for_each(this, head) {
if (this == obj) {
- D1(printk("%p is on list at %p\n", obj, head));
+ jffs2_dbg(1, "%p is on list at %p\n", obj, head);
return 1;
}
@@ -511,16 +536,18 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
uint32_t freed_len;
if(unlikely(!ref)) {
- printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
+ pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
return;
}
if (ref_obsolete(ref)) {
- D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
+ jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
+ __func__, ref_offset(ref));
return;
}
blocknr = ref->flash_offset / c->sector_size;
if (blocknr >= c->nr_blocks) {
- printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
+ pr_notice("raw node at 0x%08x is off the end of device!\n",
+ ref->flash_offset);
BUG();
}
jeb = &c->blocks[blocknr];
@@ -542,27 +569,31 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
if (ref_flags(ref) == REF_UNCHECKED) {
D1(if (unlikely(jeb->unchecked_size < freed_len)) {
- printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
- freed_len, blocknr, ref->flash_offset, jeb->used_size);
+ pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
+ freed_len, blocknr,
+ ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
+ jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
+ ref_offset(ref), freed_len);
jeb->unchecked_size -= freed_len;
c->unchecked_size -= freed_len;
} else {
D1(if (unlikely(jeb->used_size < freed_len)) {
- printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
- freed_len, blocknr, ref->flash_offset, jeb->used_size);
+ pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
+ freed_len, blocknr,
+ ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
+ jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
+ ref_offset(ref), freed_len);
jeb->used_size -= freed_len;
c->used_size -= freed_len;
}
// Take care, that wasted size is taken into concern
if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
- D1(printk("Dirtying\n"));
+ jffs2_dbg(1, "Dirtying\n");
addedsize = freed_len;
jeb->dirty_size += freed_len;
c->dirty_size += freed_len;
@@ -570,12 +601,12 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
/* Convert wasted space to dirty, if not a bad block */
if (jeb->wasted_size) {
if (on_list(&jeb->list, &c->bad_used_list)) {
- D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
- jeb->offset));
+ jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
+ jeb->offset);
addedsize = 0; /* To fool the refiling code later */
} else {
- D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
- jeb->wasted_size, jeb->offset));
+ jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
+ jeb->wasted_size, jeb->offset);
addedsize += jeb->wasted_size;
jeb->dirty_size += jeb->wasted_size;
c->dirty_size += jeb->wasted_size;
@@ -584,7 +615,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
}
}
} else {
- D1(printk("Wasting\n"));
+ jffs2_dbg(1, "Wasting\n");
addedsize = 0;
jeb->wasted_size += freed_len;
c->wasted_size += freed_len;
@@ -606,50 +637,57 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
}
if (jeb == c->nextblock) {
- D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
+ jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
+ jeb->offset);
} else if (!jeb->used_size && !jeb->unchecked_size) {
if (jeb == c->gcblock) {
- D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
+ jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
+ jeb->offset);
c->gcblock = NULL;
} else {
- D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
+ jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
+ jeb->offset);
list_del(&jeb->list);
}
if (jffs2_wbuf_dirty(c)) {
- D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
+ jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
} else {
if (jiffies & 127) {
/* Most of the time, we just erase it immediately. Otherwise we
spend ages scanning it on mount, etc. */
- D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+ jffs2_dbg(1, "...and adding to erase_pending_list\n");
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
- D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+ jffs2_dbg(1, "...and adding to erasable_list\n");
list_add_tail(&jeb->list, &c->erasable_list);
}
}
- D1(printk(KERN_DEBUG "Done OK\n"));
+ jffs2_dbg(1, "Done OK\n");
} else if (jeb == c->gcblock) {
- D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
+ jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
+ jeb->offset);
} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
- D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
+ jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
+ jeb->offset);
list_del(&jeb->list);
- D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
+ jffs2_dbg(1, "...and adding to dirty_list\n");
list_add_tail(&jeb->list, &c->dirty_list);
} else if (VERYDIRTY(c, jeb->dirty_size) &&
!VERYDIRTY(c, jeb->dirty_size - addedsize)) {
- D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
+ jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
+ jeb->offset);
list_del(&jeb->list);
- D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
+ jffs2_dbg(1, "...and adding to very_dirty_list\n");
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
- D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size,
+ jeb->used_size);
}
spin_unlock(&c->erase_completion_lock);
@@ -665,33 +703,40 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
- D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
+ jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
+ ref_offset(ref));
ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
- printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+ pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
+ ref_offset(ref), ret);
goto out_erase_sem;
}
if (retlen != sizeof(n)) {
- printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+ pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
+ ref_offset(ref), retlen);
goto out_erase_sem;
}
if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
- printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
+ pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
+ je32_to_cpu(n.totlen), freed_len);
goto out_erase_sem;
}
if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
- D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
+ jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
+ ref_offset(ref), je16_to_cpu(n.nodetype));
goto out_erase_sem;
}
/* XXX FIXME: This is ugly now */
n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
- printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+ pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
+ ref_offset(ref), ret);
goto out_erase_sem;
}
if (retlen != sizeof(n)) {
- printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+ pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
+ ref_offset(ref), retlen);
goto out_erase_sem;
}
@@ -751,8 +796,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
return 1;
if (c->unchecked_size) {
- D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
- c->unchecked_size, c->checked_ino));
+ jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+ c->unchecked_size, c->checked_ino);
return 1;
}
@@ -780,8 +825,9 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
}
}
- D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
- c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
+ jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
+ __func__, c->nr_free_blocks, c->nr_erasing_blocks,
+ c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
return ret;
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index ab65ee3ec858..1cd3aec9d9ae 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -76,7 +76,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf)
-#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf))
#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; })
#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; })
#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
@@ -108,8 +108,6 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
-#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
-#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
/* wbuf.c */
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index 3f39be1b0455..0b042b1fc82f 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/crc32.h>
@@ -36,24 +38,25 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
if (ret) {
jffs2_free_raw_inode(ri);
- printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret);
+ pr_warn("Error reading node from 0x%08x: %d\n",
+ ref_offset(fd->raw), ret);
return ret;
}
if (readlen != sizeof(*ri)) {
jffs2_free_raw_inode(ri);
- printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
- ref_offset(fd->raw), sizeof(*ri), readlen);
+ pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
+ ref_offset(fd->raw), sizeof(*ri), readlen);
return -EIO;
}
crc = crc32(0, ri, sizeof(*ri)-8);
- D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
+ jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
- je32_to_cpu(ri->offset), buf));
+ je32_to_cpu(ri->offset), buf);
if (crc != je32_to_cpu(ri->node_crc)) {
- printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n",
- je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
+ pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n",
+ je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
ret = -EIO;
goto out_ri;
}
@@ -66,8 +69,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
}
D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
- printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
- len, ofs, je32_to_cpu(ri->dsize));
+ pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
+ len, ofs, je32_to_cpu(ri->dsize));
ret = -EINVAL;
goto out_ri;
});
@@ -107,8 +110,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
decomprbuf = readbuf;
}
- D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
- readbuf));
+ jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
+ readbuf);
ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
je32_to_cpu(ri->csize), &readlen, readbuf);
@@ -119,18 +122,19 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
if (crc != je32_to_cpu(ri->data_crc)) {
- printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n",
- je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
+ pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n",
+ je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
ret = -EIO;
goto out_decomprbuf;
}
- D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc));
+ jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc);
if (ri->compr != JFFS2_COMPR_NONE) {
- D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
- je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf));
+ jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n",
+ je32_to_cpu(ri->csize), readbuf,
+ je32_to_cpu(ri->dsize), decomprbuf);
ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
if (ret) {
- printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
+ pr_warn("Error: jffs2_decompress returned %d\n", ret);
goto out_decomprbuf;
}
}
@@ -157,8 +161,8 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_node_frag *frag;
int ret;
- D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n",
- f->inocache->ino, offset, offset+len));
+ jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n",
+ __func__, f->inocache->ino, offset, offset + len);
frag = jffs2_lookup_node_frag(&f->fragtree, offset);
@@ -168,22 +172,27 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
* (or perhaps is before it, if we've been asked to read off the
* end of the file). */
while(offset < end) {
- D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
+ jffs2_dbg(2, "%s(): offset %d, end %d\n",
+ __func__, offset, end);
if (unlikely(!frag || frag->ofs > offset ||
frag->ofs + frag->size <= offset)) {
uint32_t holesize = end - offset;
if (frag && frag->ofs > offset) {
- D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
+ jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
+ f->inocache->ino, frag->ofs, offset);
holesize = min(holesize, frag->ofs - offset);
}
- D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
+ jffs2_dbg(1, "Filling non-frag hole from %d-%d\n",
+ offset, offset + holesize);
memset(buf, 0, holesize);
buf += holesize;
offset += holesize;
continue;
} else if (unlikely(!frag->node)) {
uint32_t holeend = min(end, frag->ofs + frag->size);
- D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
+ jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n",
+ offset, holeend, frag->ofs,
+ frag->ofs + frag->size);
memset(buf, 0, holeend - offset);
buf += holeend - offset;
offset = holeend;
@@ -195,20 +204,23 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
fragofs = offset - frag->ofs;
readlen = min(frag->size - fragofs, end - offset);
- D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
- frag->ofs+fragofs, frag->ofs+fragofs+readlen,
- ref_offset(frag->node->raw), ref_flags(frag->node->raw)));
+ jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n",
+ frag->ofs+fragofs,
+ frag->ofs + fragofs+readlen,
+ ref_offset(frag->node->raw),
+ ref_flags(frag->node->raw));
ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
- D2(printk(KERN_DEBUG "node read done\n"));
+ jffs2_dbg(2, "node read done\n");
if (ret) {
- D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret));
+ jffs2_dbg(1, "%s(): error %d\n",
+ __func__, ret);
memset(buf, 0, readlen);
return ret;
}
buf += readlen;
offset += readlen;
frag = frag_next(frag);
- D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
+ jffs2_dbg(2, "node read was OK. Looping\n");
}
}
return 0;
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 3093ac4fb24c..dc0437e84763 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index f99464833bb2..7654e87b0428 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -22,15 +24,15 @@
#define DEFAULT_EMPTY_SCAN_SIZE 256
-#define noisy_printk(noise, args...) do { \
- if (*(noise)) { \
- printk(KERN_NOTICE args); \
- (*(noise))--; \
- if (!(*(noise))) { \
- printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
- } \
- } \
-} while(0)
+#define noisy_printk(noise, fmt, ...) \
+do { \
+ if (*(noise)) { \
+ pr_notice(fmt, ##__VA_ARGS__); \
+ (*(noise))--; \
+ if (!(*(noise))) \
+ pr_notice("Further such events for this erase block will not be printed\n"); \
+ } \
+} while (0)
static uint32_t pseudo_random;
@@ -96,18 +98,17 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
#ifndef __ECOS
size_t pointlen, try_size;
- if (c->mtd->point) {
- ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
- (void **)&flashbuf, NULL);
- if (!ret && pointlen < c->mtd->size) {
- /* Don't muck about if it won't let us point to the whole flash */
- D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
- mtd_unpoint(c->mtd, 0, pointlen);
- flashbuf = NULL;
- }
- if (ret && ret != -EOPNOTSUPP)
- D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+ ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
+ (void **)&flashbuf, NULL);
+ if (!ret && pointlen < c->mtd->size) {
+ /* Don't muck about if it won't let us point to the whole flash */
+ jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
+ pointlen);
+ mtd_unpoint(c->mtd, 0, pointlen);
+ flashbuf = NULL;
}
+ if (ret && ret != -EOPNOTSUPP)
+ jffs2_dbg(1, "MTD point failed %d\n", ret);
#endif
if (!flashbuf) {
/* For NAND it's quicker to read a whole eraseblock at a time,
@@ -117,15 +118,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
else
try_size = PAGE_SIZE;
- D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu "
- "bytes\n", try_size));
+ jffs2_dbg(1, "Trying to allocate readbuf of %zu "
+ "bytes\n", try_size);
flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
if (!flashbuf)
return -ENOMEM;
- D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n",
- try_size));
+ jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
+ try_size);
buf_size = (uint32_t)try_size;
}
@@ -178,7 +179,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
c->nr_free_blocks++;
} else {
/* Dirt */
- D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
+ jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n",
+ jeb->offset);
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
}
@@ -205,7 +207,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
}
/* update collected summary information for the current nextblock */
jffs2_sum_move_collected(c, s);
- D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
+ jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
+ __func__, jeb->offset);
c->nextblock = jeb;
} else {
ret = file_dirty(c, jeb);
@@ -217,20 +220,21 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
case BLK_STATE_ALLDIRTY:
/* Nothing valid - not even a clean marker. Needs erasing. */
/* For now we just put it on the erasing list. We'll start the erases later */
- D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
+ jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n",
+ jeb->offset);
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
break;
case BLK_STATE_BADBLOCK:
- D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
+ jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset);
list_add(&jeb->list, &c->bad_list);
c->bad_size += c->sector_size;
c->free_size -= c->sector_size;
bad_blocks++;
break;
default:
- printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
+ pr_warn("%s(): unknown block state\n", __func__);
BUG();
}
}
@@ -250,16 +254,17 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
- D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
- skip));
+ jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
+ __func__, skip);
jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
jffs2_scan_dirty_space(c, c->nextblock, skip);
}
#endif
if (c->nr_erasing_blocks) {
if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
- printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
- printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
+ pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
+ pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
+ empty_blocks, bad_blocks, c->nr_blocks);
ret = -EIO;
goto out;
}
@@ -287,11 +292,13 @@ static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
if (ret) {
- D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
+ jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n",
+ len, ofs, ret);
return ret;
}
if (retlen < len) {
- D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
+ jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n",
+ ofs, retlen);
return -EIO;
}
return 0;
@@ -368,7 +375,7 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
if (jffs2_sum_active())
jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
- dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
+ dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n",
ofs, xd->xid, xd->version);
return 0;
}
@@ -449,7 +456,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
ofs = jeb->offset;
prevofs = jeb->offset - 1;
- D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
+ jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
if (jffs2_cleanmarker_oob(c)) {
@@ -459,7 +466,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
return BLK_STATE_BADBLOCK;
ret = jffs2_check_nand_cleanmarker(c, jeb);
- D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
+ jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret);
/* Even if it's not found, we still scan to see
if the block is empty. We use this information
@@ -561,7 +568,8 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
if (jffs2_cleanmarker_oob(c)) {
/* scan oob, take care of cleanmarker */
int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
- D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
+ jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n",
+ ret);
switch (ret) {
case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
case 1: return BLK_STATE_ALLDIRTY;
@@ -569,15 +577,16 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
}
}
#endif
- D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
+ jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n",
+ jeb->offset);
if (c->cleanmarker_size == 0)
return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
else
return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
}
if (ofs) {
- D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
- jeb->offset + ofs));
+ jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset,
+ jeb->offset + ofs);
if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
return err;
if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
@@ -604,12 +613,13 @@ scan_more:
cond_resched();
if (ofs & 3) {
- printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
+ pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
ofs = PAD(ofs);
continue;
}
if (ofs == prevofs) {
- printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
+ pr_warn("ofs 0x%08x has already been seen. Skipping\n",
+ ofs);
if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
return err;
ofs += 4;
@@ -618,8 +628,10 @@ scan_more:
prevofs = ofs;
if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
- D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
- jeb->offset, c->sector_size, ofs, sizeof(*node)));
+ jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n",
+ sizeof(struct jffs2_unknown_node),
+ jeb->offset, c->sector_size, ofs,
+ sizeof(*node));
if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
return err;
break;
@@ -627,8 +639,9 @@ scan_more:
if (buf_ofs + buf_len < ofs + sizeof(*node)) {
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
- D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
- sizeof(struct jffs2_unknown_node), buf_len, ofs));
+ jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
+ sizeof(struct jffs2_unknown_node),
+ buf_len, ofs);
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
if (err)
return err;
@@ -645,13 +658,13 @@ scan_more:
ofs += 4;
scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
- D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
+ jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs);
more_empty:
inbuf_ofs = ofs - buf_ofs;
while (inbuf_ofs < scan_end) {
if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
- printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
- empty_start, ofs);
+ pr_warn("Empty flash at 0x%08x ends at 0x%08x\n",
+ empty_start, ofs);
if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
return err;
goto scan_more;
@@ -661,13 +674,15 @@ scan_more:
ofs += 4;
}
/* Ran off end. */
- D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
+ jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n",
+ ofs);
/* If we're only checking the beginning of a block with a cleanmarker,
bail now */
if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
- D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
+ jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n",
+ EMPTY_SCAN_SIZE(c->sector_size));
return BLK_STATE_CLEANMARKER;
}
if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
@@ -680,13 +695,14 @@ scan_more:
if (!buf_len) {
/* No more to read. Break out of main loop without marking
this range of empty space as dirty (because it's not) */
- D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
- empty_start));
+ jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n",
+ empty_start);
break;
}
/* point never reaches here */
scan_end = buf_len;
- D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
+ jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n",
+ buf_len, ofs);
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
if (err)
return err;
@@ -695,22 +711,23 @@ scan_more:
}
if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
- printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
+ pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n",
+ ofs);
if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
return err;
ofs += 4;
continue;
}
if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
- D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
+ jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs);
if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
return err;
ofs += 4;
continue;
}
if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
- printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
- printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
+ pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs);
+ pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n");
if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
return err;
ofs += 4;
@@ -718,7 +735,8 @@ scan_more:
}
if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
/* OK. We're out of possibilities. Whinge and move on */
- noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
+ noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
+ __func__,
JFFS2_MAGIC_BITMASK, ofs,
je16_to_cpu(node->magic));
if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
@@ -733,7 +751,8 @@ scan_more:
hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
- noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
+ noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
+ __func__,
ofs, je16_to_cpu(node->magic),
je16_to_cpu(node->nodetype),
je32_to_cpu(node->totlen),
@@ -747,9 +766,9 @@ scan_more:
if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
/* Eep. Node goes over the end of the erase block. */
- printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
- ofs, je32_to_cpu(node->totlen));
- printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
+ pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
+ ofs, je32_to_cpu(node->totlen));
+ pr_warn("Perhaps the file system was created with the wrong erase size?\n");
if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
return err;
ofs += 4;
@@ -758,7 +777,8 @@ scan_more:
if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
/* Wheee. This is an obsoleted node */
- D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
+ jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n",
+ ofs);
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
return err;
ofs += PAD(je32_to_cpu(node->totlen));
@@ -769,8 +789,9 @@ scan_more:
case JFFS2_NODETYPE_INODE:
if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
- D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
- sizeof(struct jffs2_raw_inode), buf_len, ofs));
+ jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
+ sizeof(struct jffs2_raw_inode),
+ buf_len, ofs);
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
if (err)
return err;
@@ -785,8 +806,9 @@ scan_more:
case JFFS2_NODETYPE_DIRENT:
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
- D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
- je32_to_cpu(node->totlen), buf_len, ofs));
+ jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
+ je32_to_cpu(node->totlen), buf_len,
+ ofs);
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
if (err)
return err;
@@ -802,9 +824,9 @@ scan_more:
case JFFS2_NODETYPE_XATTR:
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
- D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
- " left to end of buf. Reading 0x%x at 0x%08x\n",
- je32_to_cpu(node->totlen), buf_len, ofs));
+ jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n",
+ je32_to_cpu(node->totlen), buf_len,
+ ofs);
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
if (err)
return err;
@@ -819,9 +841,9 @@ scan_more:
case JFFS2_NODETYPE_XREF:
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
- D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
- " left to end of buf. Reading 0x%x at 0x%08x\n",
- je32_to_cpu(node->totlen), buf_len, ofs));
+ jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n",
+ je32_to_cpu(node->totlen), buf_len,
+ ofs);
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
if (err)
return err;
@@ -836,15 +858,17 @@ scan_more:
#endif /* CONFIG_JFFS2_FS_XATTR */
case JFFS2_NODETYPE_CLEANMARKER:
- D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
+ jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs);
if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
- printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
- ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
+ pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
+ ofs, je32_to_cpu(node->totlen),
+ c->cleanmarker_size);
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
return err;
ofs += PAD(sizeof(struct jffs2_unknown_node));
} else if (jeb->first_node) {
- printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
+ pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n",
+ ofs, jeb->offset);
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
return err;
ofs += PAD(sizeof(struct jffs2_unknown_node));
@@ -866,7 +890,8 @@ scan_more:
default:
switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
case JFFS2_FEATURE_ROCOMPAT:
- printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+ pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n",
+ je16_to_cpu(node->nodetype), ofs);
c->flags |= JFFS2_SB_FLAG_RO;
if (!(jffs2_is_readonly(c)))
return -EROFS;
@@ -876,18 +901,21 @@ scan_more:
break;
case JFFS2_FEATURE_INCOMPAT:
- printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+ pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n",
+ je16_to_cpu(node->nodetype), ofs);
return -EINVAL;
case JFFS2_FEATURE_RWCOMPAT_DELETE:
- D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+ jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
+ je16_to_cpu(node->nodetype), ofs);
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
return err;
ofs += PAD(je32_to_cpu(node->totlen));
break;
case JFFS2_FEATURE_RWCOMPAT_COPY: {
- D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+ jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
+ je16_to_cpu(node->nodetype), ofs);
jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
@@ -908,8 +936,9 @@ scan_more:
}
}
- D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
- jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
+ jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size,
+ jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
/* mark_node_obsolete can add to wasted !! */
if (jeb->wasted_size) {
@@ -935,7 +964,7 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin
ic = jffs2_alloc_inode_cache();
if (!ic) {
- printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
+ pr_notice("%s(): allocation of inode cache failed\n", __func__);
return NULL;
}
memset(ic, 0, sizeof(*ic));
@@ -954,7 +983,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
struct jffs2_inode_cache *ic;
uint32_t crc, ino = je32_to_cpu(ri->ino);
- D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
+ jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
/* We do very little here now. Just check the ino# to which we should attribute
this node; we can do all the CRC checking etc. later. There's a tradeoff here --
@@ -968,9 +997,8 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
/* Check the node CRC in any case. */
crc = crc32(0, ri, sizeof(*ri)-8);
if (crc != je32_to_cpu(ri->node_crc)) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on "
- "node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ofs, je32_to_cpu(ri->node_crc), crc);
+ pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ __func__, ofs, je32_to_cpu(ri->node_crc), crc);
/*
* We believe totlen because the CRC on the node
* _header_ was OK, just the node itself failed.
@@ -989,10 +1017,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
/* Wheee. It worked */
jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
- D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
+ jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
je32_to_cpu(ri->offset),
- je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
+ je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize));
pseudo_random += je32_to_cpu(ri->version);
@@ -1012,15 +1040,15 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
uint32_t crc;
int err;
- D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
+ jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
/* We don't get here unless the node is still valid, so we don't have to
mask in the ACCURATE bit any more. */
crc = crc32(0, rd, sizeof(*rd)-8);
if (crc != je32_to_cpu(rd->node_crc)) {
- printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ofs, je32_to_cpu(rd->node_crc), crc);
+ pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ __func__, ofs, je32_to_cpu(rd->node_crc), crc);
/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
return err;
@@ -1032,7 +1060,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
/* Should never happen. Did. (OLPC trac #4184)*/
checkedlen = strnlen(rd->name, rd->nsize);
if (checkedlen < rd->nsize) {
- printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
+ pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
ofs, checkedlen);
}
fd = jffs2_alloc_full_dirent(checkedlen+1);
@@ -1044,9 +1072,10 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
crc = crc32(0, fd->name, rd->nsize);
if (crc != je32_to_cpu(rd->name_crc)) {
- printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ofs, je32_to_cpu(rd->name_crc), crc);
- D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
+ pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ __func__, ofs, je32_to_cpu(rd->name_crc), crc);
+ jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n",
+ fd->name, je32_to_cpu(rd->ino));
jffs2_free_full_dirent(fd);
/* FIXME: Why do we believe totlen? */
/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 0f20208df602..aca97f35b292 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -23,8 +23,8 @@
#include "nodelist.h"
/* ---- Initial Security Label(s) Attachment callback --- */
-int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *fs_info)
+static int jffs2_initxattrs(struct inode *inode,
+ const struct xattr *xattr_array, void *fs_info)
{
const struct xattr *xattr;
int err = 0;
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index e537fb0e0184..c522d098bb4f 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -442,13 +444,16 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
/* This should never happen, but https://dev.laptop.org/ticket/4184 */
checkedlen = strnlen(spd->name, spd->nsize);
if (!checkedlen) {
- printk(KERN_ERR "Dirent at %08x has zero at start of name. Aborting mount.\n",
- jeb->offset + je32_to_cpu(spd->offset));
+ pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n",
+ jeb->offset +
+ je32_to_cpu(spd->offset));
return -EIO;
}
if (checkedlen < spd->nsize) {
- printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
- jeb->offset + je32_to_cpu(spd->offset), checkedlen);
+ pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
+ jeb->offset +
+ je32_to_cpu(spd->offset),
+ checkedlen);
}
@@ -808,8 +813,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
sum_ofs = jeb->offset + c->sector_size - jeb->free_size;
- dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
- sum_ofs);
+ dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs);
ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f2d96b5e64f6..f9916f312bd8 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -69,7 +71,7 @@ static void jffs2_write_super(struct super_block *sb)
sb->s_dirt = 0;
if (!(sb->s_flags & MS_RDONLY)) {
- D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
+ jffs2_dbg(1, "%s()\n", __func__);
jffs2_flush_wbuf_gc(c, 0);
}
@@ -214,8 +216,8 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
JFFS2_COMPR_MODE_FORCEZLIB;
#endif
else {
- printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"",
- name);
+ pr_err("Error: unknown compressor \"%s\"\n",
+ name);
kfree(name);
return -EINVAL;
}
@@ -223,8 +225,8 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
c->mount_opts.override_compr = true;
break;
default:
- printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n",
- p);
+ pr_err("Error: unrecognized mount option '%s' or missing value\n",
+ p);
return -EINVAL;
}
}
@@ -266,9 +268,9 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
struct jffs2_sb_info *c;
int ret;
- D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():"
+ jffs2_dbg(1, "jffs2_get_sb_mtd():"
" New superblock for device %d (\"%s\")\n",
- sb->s_mtd->index, sb->s_mtd->name));
+ sb->s_mtd->index, sb->s_mtd->name);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -315,7 +317,7 @@ static void jffs2_put_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+ jffs2_dbg(2, "%s()\n", __func__);
if (sb->s_dirt)
jffs2_write_super(sb);
@@ -336,7 +338,7 @@ static void jffs2_put_super (struct super_block *sb)
kfree(c->inocache_list);
jffs2_clear_xattr_subsystem(c);
mtd_sync(c->mtd);
- D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
+ jffs2_dbg(1, "%s(): returning\n", __func__);
}
static void jffs2_kill_sb(struct super_block *sb)
@@ -371,7 +373,7 @@ static int __init init_jffs2_fs(void)
BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
- printk(KERN_INFO "JFFS2 version 2.2."
+ pr_info("version 2.2."
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
" (NAND)"
#endif
@@ -386,22 +388,22 @@ static int __init init_jffs2_fs(void)
SLAB_MEM_SPREAD),
jffs2_i_init_once);
if (!jffs2_inode_cachep) {
- printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
+ pr_err("error: Failed to initialise inode cache\n");
return -ENOMEM;
}
ret = jffs2_compressors_init();
if (ret) {
- printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
+ pr_err("error: Failed to initialise compressors\n");
goto out;
}
ret = jffs2_create_slab_caches();
if (ret) {
- printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
+ pr_err("error: Failed to initialise slab caches\n");
goto out_compressors;
}
ret = register_filesystem(&jffs2_fs_type);
if (ret) {
- printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n");
+ pr_err("error: Failed to register filesystem\n");
goto out_slab;
}
return 0;
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index e3035afb1814..6e563332bb24 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/namei.h>
@@ -47,10 +49,11 @@ static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
*/
if (!p) {
- printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n");
+ pr_err("%s(): can't find symlink target\n", __func__);
p = ERR_PTR(-EIO);
}
- D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target));
+ jffs2_dbg(1, "%s(): target path is '%s'\n",
+ __func__, (char *)f->target);
nd_set_link(nd, p);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 30e8f47e8a23..74d9be19df3f 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -91,7 +93,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
- D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
+ jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
jffs2_clear_wbuf_ino_list(c);
c->wbuf_inodes = &inodirty_nomem;
return;
@@ -113,19 +115,20 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
+ jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
+ jeb->offset);
list_del(this);
if ((jiffies + (n++)) & 127) {
/* Most of the time, we just erase it immediately. Otherwise we
spend ages scanning it on mount, etc. */
- D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+ jffs2_dbg(1, "...and adding to erase_pending_list\n");
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
- D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+ jffs2_dbg(1, "...and adding to erasable_list\n");
list_add_tail(&jeb->list, &c->erasable_list);
}
}
@@ -136,7 +139,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
{
- D1(printk("About to refile bad block at %08x\n", jeb->offset));
+ jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
/* File the existing block on the bad_used_list.... */
if (c->nextblock == jeb)
@@ -144,12 +147,14 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
else /* Not sure this should ever happen... need more coffee */
list_del(&jeb->list);
if (jeb->first_node) {
- D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
+ jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
+ jeb->offset);
list_add(&jeb->list, &c->bad_used_list);
} else {
BUG_ON(allow_empty == REFILE_NOTEMPTY);
/* It has to have had some nodes or we couldn't be here */
- D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
+ jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
+ jeb->offset);
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
@@ -230,10 +235,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
- printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret);
+ pr_warn("%s(): Read back of page at %08x failed: %d\n",
+ __func__, c->wbuf_ofs, ret);
return ret;
} else if (retlen != c->wbuf_pagesize) {
- printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize);
+ pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
+ __func__, ofs, retlen, c->wbuf_pagesize);
return -EIO;
}
if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
@@ -246,12 +253,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
else
eccstr = "OK or unused";
- printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n",
- eccstr, c->wbuf_ofs);
+ pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
+ eccstr, c->wbuf_ofs);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
c->wbuf, c->wbuf_pagesize, 0);
- printk(KERN_WARNING "Read back:\n");
+ pr_warn("Read back:\n");
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
c->wbuf_verify, c->wbuf_pagesize, 0);
@@ -308,7 +315,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
if (!first_raw) {
/* All nodes were obsolete. Nothing to recover. */
- D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
+ jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
c->wbuf_len = 0;
return;
}
@@ -331,7 +338,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
buf = kmalloc(end - start, GFP_KERNEL);
if (!buf) {
- printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
+ pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
goto read_failed;
}
@@ -346,7 +353,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
ret = 0;
if (ret || retlen != c->wbuf_ofs - start) {
- printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
+ pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
kfree(buf);
buf = NULL;
@@ -380,7 +387,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* ... and get an allocation of space from a shiny new block instead */
ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
if (ret) {
- printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
+ pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
kfree(buf);
return;
}
@@ -390,7 +397,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
if (ret) {
- printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
+ pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
kfree(buf);
return;
}
@@ -406,13 +413,13 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
unsigned char *rewrite_buf = buf?:c->wbuf;
uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
- D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
- towrite, ofs));
+ jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
+ towrite, ofs);
#ifdef BREAKMEHEADER
static int breakme;
if (breakme++ == 20) {
- printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
+ pr_notice("Faking write error at 0x%08x\n", ofs);
breakme = 0;
mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
ret = -EIO;
@@ -423,7 +430,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
/* Argh. We tried. Really we did. */
- printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
+ pr_crit("Recovery of wbuf failed due to a second write error\n");
kfree(buf);
if (retlen)
@@ -431,7 +438,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
return;
}
- printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
+ pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
c->wbuf_len = (end - start) - towrite;
c->wbuf_ofs = ofs + towrite;
@@ -459,8 +466,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
struct jffs2_raw_node_ref **adjust_ref = NULL;
struct jffs2_inode_info *f = NULL;
- D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
- rawlen, ref_offset(raw), ref_flags(raw), ofs));
+ jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
+ rawlen, ref_offset(raw), ref_flags(raw), ofs);
ic = jffs2_raw_ref_to_ic(raw);
@@ -540,7 +547,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* Fix up the original jeb now it's on the bad_list */
if (first_raw == jeb->first_node) {
- D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
+ jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
+ jeb->offset);
list_move(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
@@ -554,7 +562,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
+ jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
+ c->wbuf_ofs, c->wbuf_len);
}
@@ -579,7 +588,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
return 0;
if (!mutex_is_locked(&c->alloc_sem)) {
- printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
+ pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
BUG();
}
@@ -617,7 +626,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
#ifdef BREAKME
static int breakme;
if (breakme++ == 20) {
- printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
+ pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
breakme = 0;
mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
brokenbuf);
@@ -629,11 +638,11 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
&retlen, c->wbuf);
if (ret) {
- printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret);
+ pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
goto wfail;
} else if (retlen != c->wbuf_pagesize) {
- printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
- retlen, c->wbuf_pagesize);
+ pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
+ retlen, c->wbuf_pagesize);
ret = -EIO;
goto wfail;
} else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
@@ -647,17 +656,18 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
if (pad) {
uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
- (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
+ jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
+ (wbuf_jeb == c->nextblock) ? "next" : "",
+ wbuf_jeb->offset);
/* wbuf_pagesize - wbuf_len is the amount of space that's to be
padded. If there is less free space in the block than that,
something screwed up */
if (wbuf_jeb->free_size < waste) {
- printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
- c->wbuf_ofs, c->wbuf_len, waste);
- printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
- wbuf_jeb->offset, wbuf_jeb->free_size);
+ pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
+ c->wbuf_ofs, c->wbuf_len, waste);
+ pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
+ wbuf_jeb->offset, wbuf_jeb->free_size);
BUG();
}
@@ -694,14 +704,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
uint32_t old_wbuf_len;
int ret = 0;
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
+ jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
if (!c->wbuf)
return 0;
mutex_lock(&c->alloc_sem);
if (!jffs2_wbuf_pending_for_ino(c, ino)) {
- D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
+ jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
mutex_unlock(&c->alloc_sem);
return 0;
}
@@ -711,7 +721,8 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
if (c->unchecked_size) {
/* GC won't make any progress for a while */
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
+ jffs2_dbg(1, "%s(): padding. Not finished checking\n",
+ __func__);
down_write(&c->wbuf_sem);
ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
/* retry flushing wbuf in case jffs2_wbuf_recover
@@ -724,7 +735,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
mutex_unlock(&c->alloc_sem);
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
+ jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
ret = jffs2_garbage_collect_pass(c);
if (ret) {
@@ -742,7 +753,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
mutex_lock(&c->alloc_sem);
}
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
+ jffs2_dbg(1, "%s(): ends...\n", __func__);
mutex_unlock(&c->alloc_sem);
return ret;
@@ -811,9 +822,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
/* It's a write to a new block */
if (c->wbuf_len) {
- D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
- "causes flush of wbuf at 0x%08x\n",
- (unsigned long)to, c->wbuf_ofs));
+ jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
+ __func__, (unsigned long)to, c->wbuf_ofs);
ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
if (ret)
goto outerr;
@@ -825,11 +835,11 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
/* We're not writing immediately after the writebuffer. Bad. */
- printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
- "to %08lx\n", (unsigned long)to);
+ pr_crit("%s(): Non-contiguous write to %08lx\n",
+ __func__, (unsigned long)to);
if (c->wbuf_len)
- printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
- c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
+ pr_crit("wbuf was previously %08x-%08x\n",
+ c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
BUG();
}
@@ -957,8 +967,8 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
if (ret == -EBADMSG)
- printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
- " returned ECC error\n", len, ofs);
+ pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
+ len, ofs);
/*
* We have the raw data without ECC correction in the buffer,
* maybe we are lucky and all data or parts are correct. We
@@ -1034,9 +1044,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
if (ret || ops.oobretlen != ops.ooblen) {
- printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
- " bytes, read %zd bytes, error %d\n",
- jeb->offset, ops.ooblen, ops.oobretlen, ret);
+ pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
if (!ret)
ret = -EIO;
return ret;
@@ -1048,8 +1057,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
continue;
if (ops.oobbuf[i] != 0xFF) {
- D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
- "%08x\n", ops.oobbuf[i], i, jeb->offset));
+ jffs2_dbg(2, "Found %02x at %x in OOB for "
+ "%08x\n", ops.oobbuf[i], i, jeb->offset);
return 1;
}
}
@@ -1077,9 +1086,8 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
if (ret || ops.oobretlen != ops.ooblen) {
- printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
- " bytes, read %zd bytes, error %d\n",
- jeb->offset, ops.ooblen, ops.oobretlen, ret);
+ pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
if (!ret)
ret = -EIO;
return ret;
@@ -1103,9 +1111,8 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
if (ret || ops.oobretlen != ops.ooblen) {
- printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
- " bytes, read %zd bytes, error %d\n",
- jeb->offset, ops.ooblen, ops.oobretlen, ret);
+ pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
if (!ret)
ret = -EIO;
return ret;
@@ -1130,11 +1137,12 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
if( ++jeb->bad_count < MAX_ERASE_FAILURES)
return 0;
- printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
+ pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
ret = mtd_block_markbad(c->mtd, bad_offset);
if (ret) {
- D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+ jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
+ __func__, jeb->offset, ret);
return ret;
}
return 1;
@@ -1151,11 +1159,11 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
c->cleanmarker_size = 0;
if (!oinfo || oinfo->oobavail == 0) {
- printk(KERN_ERR "inconsistent device description\n");
+ pr_err("inconsistent device description\n");
return -EINVAL;
}
- D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
+ jffs2_dbg(1, "using OOB on NAND\n");
c->oobavail = oinfo->oobavail;
@@ -1222,7 +1230,7 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
if ((c->flash_size % c->sector_size) != 0) {
c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
- printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
+ pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
};
c->wbuf_ofs = 0xFFFFFFFF;
@@ -1239,7 +1247,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
}
#endif
- printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
+ pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
+ c->wbuf_pagesize, c->sector_size);
return 0;
}
@@ -1297,7 +1306,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
if (!c->wbuf)
return -ENOMEM;
- printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
+ pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
+ c->wbuf_pagesize, c->sector_size);
return 0;
}
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 30d175b6d290..b634de4c8101 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/crc32.h>
@@ -36,7 +38,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
f->inocache->state = INO_STATE_PRESENT;
jffs2_add_ino_cache(c, f->inocache);
- D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
+ jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino);
ri->ino = cpu_to_je32(f->inocache->ino);
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -68,7 +70,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
unsigned long cnt = 2;
D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
- printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n");
+ pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n");
BUG();
}
);
@@ -78,7 +80,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
vecs[1].iov_len = datalen;
if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
- printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
+ pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n",
+ __func__, je32_to_cpu(ri->totlen),
+ sizeof(*ri), datalen);
}
fn = jffs2_alloc_full_dnode();
@@ -95,9 +99,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
BUG_ON(!retried);
- D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, "
- "highest version %d -> updating dnode\n",
- je32_to_cpu(ri->version), f->highest_version));
+ jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n",
+ __func__,
+ je32_to_cpu(ri->version), f->highest_version);
ri->version = cpu_to_je32(++f->highest_version);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
}
@@ -106,8 +110,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
(alloc_mode==ALLOC_GC)?0:f->inocache->ino);
if (ret || (retlen != sizeof(*ri) + datalen)) {
- printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
- sizeof(*ri)+datalen, flash_ofs, ret, retlen);
+ pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ sizeof(*ri) + datalen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
if (retlen) {
@@ -118,7 +122,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
this node */
jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL);
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
+ pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
+ flash_ofs);
}
if (!retried && alloc_mode != ALLOC_NORETRY) {
/* Try to reallocate space and retry */
@@ -127,7 +132,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
retried = 1;
- D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+ jffs2_dbg(1, "Retrying failed write.\n");
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -147,14 +152,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
if (!ret) {
flash_ofs = write_ofs(c);
- D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+ jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
+ flash_ofs);
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
- D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+ jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
+ ret);
}
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dnode(fn);
@@ -183,10 +190,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
fn->size = je32_to_cpu(ri->dsize);
fn->frags = 0;
- D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
+ jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize),
je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
- je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
+ je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen));
if (retried) {
jffs2_dbg_acct_sanity_check(c,NULL);
@@ -206,22 +213,23 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
int retried = 0;
int ret;
- D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
+ jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
+ __func__,
je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
- je32_to_cpu(rd->name_crc)));
+ je32_to_cpu(rd->name_crc));
D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
- printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
+ pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n");
BUG();
});
if (strnlen(name, namelen) != namelen) {
/* This should never happen, but seems to have done on at least one
occasion: https://dev.laptop.org/ticket/4184 */
- printk(KERN_CRIT "Error in jffs2_write_dirent() -- name contains zero bytes!\n");
- printk(KERN_CRIT "Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n",
- je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
- je32_to_cpu(rd->name_crc));
+ pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n");
+ pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n",
+ je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
+ je32_to_cpu(rd->name_crc));
WARN_ON(1);
return ERR_PTR(-EIO);
}
@@ -249,9 +257,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
BUG_ON(!retried);
- D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, "
- "highest version %d -> updating dirent\n",
- je32_to_cpu(rd->version), f->highest_version));
+ jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n",
+ __func__,
+ je32_to_cpu(rd->version), f->highest_version);
rd->version = cpu_to_je32(++f->highest_version);
fd->version = je32_to_cpu(rd->version);
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -260,13 +268,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
(alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
if (ret || (retlen != sizeof(*rd) + namelen)) {
- printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
- sizeof(*rd)+namelen, flash_ofs, ret, retlen);
+ pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ sizeof(*rd) + namelen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
if (retlen) {
jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL);
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
+ pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
+ flash_ofs);
}
if (!retried) {
/* Try to reallocate space and retry */
@@ -275,7 +284,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
retried = 1;
- D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+ jffs2_dbg(1, "Retrying failed write.\n");
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -295,12 +304,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
if (!ret) {
flash_ofs = write_ofs(c);
- D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+ jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n",
+ flash_ofs);
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
- D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+ jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
+ ret);
}
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dirent(fd);
@@ -333,8 +344,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
int ret = 0;
uint32_t writtenlen = 0;
- D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n",
- f->inocache->ino, offset, writelen));
+ jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n",
+ __func__, f->inocache->ino, offset, writelen);
while(writelen) {
struct jffs2_full_dnode *fn;
@@ -345,12 +356,13 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
int retried = 0;
retry:
- D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
+ jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n",
+ writelen, offset);
ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN,
&alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
+ jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret);
break;
}
mutex_lock(&f->sem);
@@ -386,7 +398,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
if (!retried) {
/* Write error to be retried */
retried = 1;
- D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n"));
+ jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n");
goto retry;
}
break;
@@ -399,7 +411,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
}
if (ret) {
/* Eep */
- D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
+ jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n",
+ ret);
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
@@ -410,11 +423,11 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!datalen) {
- printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
+ pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
ret = -EIO;
break;
}
- D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
+ jffs2_dbg(1, "increasing writtenlen by %d\n", datalen);
writtenlen += datalen;
offset += datalen;
writelen -= datalen;
@@ -439,7 +452,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
*/
ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
JFFS2_SUMMARY_INODE_SIZE);
- D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
+ jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen);
if (ret)
return ret;
@@ -450,11 +463,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
- D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
- jemode_to_cpu(ri->mode)));
+ jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n",
+ jemode_to_cpu(ri->mode));
if (IS_ERR(fn)) {
- D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
+ jffs2_dbg(1, "jffs2_write_dnode() failed\n");
/* Eeek. Wave bye bye */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
@@ -480,7 +493,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
if (ret) {
/* Eep. */
- D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
+ jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n");
return ret;
}
@@ -597,8 +610,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
!memcmp(fd->name, name, namelen) &&
!fd->name[namelen]) {
- D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
- fd->ino, ref_offset(fd->raw)));
+ jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n",
+ fd->ino, ref_offset(fd->raw));
jffs2_mark_node_obsolete(c, fd->raw);
/* We don't want to remove it from the list immediately,
because that screws up getdents()/seek() semantics even
@@ -627,11 +640,13 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
dead_f->dents = fd->next;
if (fd->ino) {
- printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
- dead_f->inocache->ino, fd->name, fd->ino);
+ pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
+ dead_f->inocache->ino,
+ fd->name, fd->ino);
} else {
- D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
- fd->name, dead_f->inocache->ino));
+ jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n",
+ fd->name,
+ dead_f->inocache->ino);
}
if (fd->raw)
jffs2_mark_node_obsolete(c, fd->raw);
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 3e93cdd19005..b55b803eddcb 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 2774e1013b34..f49b9afc4436 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -496,7 +496,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp) \
__typeof__(type) num = which_strtol(val, &endp, 0); \
if (endp == val || *endp || num < (min) || num > (max)) \
return -EINVAL; \
- *((int *) kp->arg) = num; \
+ *((type *) kp->arg) = num; \
return 0; \
}
diff --git a/fs/nfsd/current_stateid.h b/fs/nfsd/current_stateid.h
new file mode 100644
index 000000000000..4123551208d8
--- /dev/null
+++ b/fs/nfsd/current_stateid.h
@@ -0,0 +1,28 @@
+#ifndef _NFSD4_CURRENT_STATE_H
+#define _NFSD4_CURRENT_STATE_H
+
+#include "state.h"
+#include "xdr4.h"
+
+extern void clear_current_stateid(struct nfsd4_compound_state *cstate);
+/*
+ * functions to set current state id
+ */
+extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
+extern void nfsd4_set_openstateid(struct nfsd4_compound_state *, struct nfsd4_open *);
+extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *, struct nfsd4_lock *);
+extern void nfsd4_set_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
+
+/*
+ * functions to consume current state id
+ */
+extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
+extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *, struct nfsd4_delegreturn *);
+extern void nfsd4_get_freestateid(struct nfsd4_compound_state *, struct nfsd4_free_stateid *);
+extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *, struct nfsd4_setattr *);
+extern void nfsd4_get_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
+extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *, struct nfsd4_locku *);
+extern void nfsd4_get_readstateid(struct nfsd4_compound_state *, struct nfsd4_read *);
+extern void nfsd4_get_writestateid(struct nfsd4_compound_state *, struct nfsd4_write *);
+
+#endif /* _NFSD4_CURRENT_STATE_H */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index cf8a6bd062fa..8e9689abbc0c 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
struct svc_expkey key;
struct svc_expkey *ek = NULL;
- if (mlen < 1 || mesg[mlen-1] != '\n')
+ if (mesg[mlen - 1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
new file mode 100644
index 000000000000..12e0cff435b4
--- /dev/null
+++ b/fs/nfsd/netns.h
@@ -0,0 +1,34 @@
+/*
+ * per net namespace data structures for nfsd
+ *
+ * Copyright (C) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NFSD_NETNS_H__
+#define __NFSD_NETNS_H__
+
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+struct cld_net;
+
+struct nfsd_net {
+ struct cld_net *cld_net;
+};
+
+extern int nfsd_net_id;
+#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 0e262f32ac41..c8e9f637153a 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -645,7 +645,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
.timeout = &timeparms,
.program = &cb_program,
.version = 0,
- .authflavor = clp->cl_flavor,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
};
struct rpc_clnt *client;
@@ -656,6 +655,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.client_name = clp->cl_principal;
args.prognumber = conn->cb_prog,
args.protocol = XPRT_TRANSPORT_TCP;
+ args.authflavor = clp->cl_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
if (!conn->cb_xprt)
@@ -665,6 +665,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = XPRT_TRANSPORT_BC_TCP;
+ args.authflavor = RPC_AUTH_UNIX;
}
/* Create RPC client */
client = rpc_create(&args);
@@ -754,9 +755,9 @@ static void do_probe_callback(struct nfs4_client *clp)
*/
void nfsd4_probe_callback(struct nfs4_client *clp)
{
- /* XXX: atomicity? Also, should we be using cl_cb_flags? */
+ /* XXX: atomicity? Also, should we be using cl_flags? */
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
- set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
+ set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
do_probe_callback(clp);
}
@@ -915,7 +916,7 @@ void nfsd4_destroy_callback_queue(void)
/* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
- set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
+ set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
/*
* Note this won't actually result in a null callback;
* instead, nfsd4_do_callback_rpc() will detect the killed
@@ -966,15 +967,15 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
clp->cl_cb_conn.cb_xprt = NULL;
}
- if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
+ if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
return;
spin_lock(&clp->cl_lock);
/*
* Only serialized callback code is allowed to clear these
* flags; main nfsd code can only set them:
*/
- BUG_ON(!clp->cl_cb_flags);
- clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
+ BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
+ clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
c = __nfsd4_find_backchannel(clp);
if (c) {
@@ -986,7 +987,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses);
if (err) {
- warn_no_callback_path(clp, err);
+ nfsd4_mark_cb_down(clp, err);
return;
}
/* Yay, the callback channel's back! Restart any callbacks: */
@@ -1000,7 +1001,7 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
- if (clp->cl_cb_flags)
+ if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
nfsd4_process_cb_update(cb);
clnt = clp->cl_cb_client;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 94096273cd6c..322d11ce06a4 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -41,6 +41,14 @@
#include "nfsd.h"
/*
+ * Turn off idmapping when using AUTH_SYS.
+ */
+static bool nfs4_disable_idmapping = true;
+module_param(nfs4_disable_idmapping, bool, 0644);
+MODULE_PARM_DESC(nfs4_disable_idmapping,
+ "Turn off server's NFSv4 idmapping when using 'sec=sys'");
+
+/*
* Cache entry
*/
@@ -561,28 +569,65 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
return ret;
}
+static bool
+numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
+{
+ int ret;
+ char buf[11];
+
+ if (namelen + 1 > sizeof(buf))
+ /* too long to represent a 32-bit id: */
+ return false;
+ /* Just to make sure it's null-terminated: */
+ memcpy(buf, name, namelen);
+ buf[namelen] = '\0';
+ ret = kstrtouint(name, 10, id);
+ return ret == 0;
+}
+
+static __be32
+do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
+{
+ if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+ if (numeric_name_to_id(rqstp, type, name, namelen, id))
+ return 0;
+ /*
+ * otherwise, fall through and try idmapping, for
+ * backwards compatibility with clients sending names:
+ */
+ return idmap_name_to_id(rqstp, type, name, namelen, id);
+}
+
+static int
+do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
+{
+ if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+ return sprintf(name, "%u", id);
+ return idmap_id_to_name(rqstp, type, id, name);
+}
+
__be32
nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
__u32 *id)
{
- return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
+ return do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
}
__be32
nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
__u32 *id)
{
- return idmap_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id);
+ return do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id);
}
int
nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
{
- return idmap_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
+ return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
}
int
nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
{
- return idmap_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
+ return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 896da74ec563..2ed14dfd00a2 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -39,6 +39,7 @@
#include "cache.h"
#include "xdr4.h"
#include "vfs.h"
+#include "current_stateid.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -192,10 +193,13 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
- struct svc_fh resfh;
+ struct svc_fh *resfh;
__be32 status;
- fh_init(&resfh, NFS4_FHSIZE);
+ resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+ if (!resfh)
+ return nfserr_jukebox;
+ fh_init(resfh, NFS4_FHSIZE);
open->op_truncate = 0;
if (open->op_create) {
@@ -220,7 +224,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
*/
status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
open->op_fname.len, &open->op_iattr,
- &resfh, open->op_createmode,
+ resfh, open->op_createmode,
(u32 *)open->op_verf.data,
&open->op_truncate, &open->op_created);
@@ -234,30 +238,29 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
FATTR4_WORD1_TIME_MODIFY);
} else {
status = nfsd_lookup(rqstp, current_fh,
- open->op_fname.data, open->op_fname.len, &resfh);
+ open->op_fname.data, open->op_fname.len, resfh);
fh_unlock(current_fh);
if (status)
goto out;
- status = nfsd_check_obj_isreg(&resfh);
+ status = nfsd_check_obj_isreg(resfh);
}
if (status)
goto out;
if (is_create_with_attrs(open) && open->op_acl != NULL)
- do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval);
-
- set_change_info(&open->op_cinfo, current_fh);
- fh_dup2(current_fh, &resfh);
+ do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
/* set reply cache */
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
- &resfh.fh_handle);
+ &resfh->fh_handle);
if (!open->op_created)
- status = do_open_permission(rqstp, current_fh, open,
+ status = do_open_permission(rqstp, resfh, open,
NFSD_MAY_NOP);
-
+ set_change_info(&open->op_cinfo, current_fh);
+ fh_dup2(current_fh, resfh);
out:
- fh_put(&resfh);
+ fh_put(resfh);
+ kfree(resfh);
return status;
}
@@ -310,16 +313,14 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
- /* We don't yet support WANT bits: */
- open->op_share_access &= NFS4_SHARE_ACCESS_MASK;
-
open->op_created = 0;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
- !cstate->session->se_client->cl_firststate &&
+ !test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
+ &cstate->session->se_client->cl_flags) &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
@@ -452,6 +453,10 @@ nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_restorefh;
fh_dup2(&cstate->current_fh, &cstate->save_fh);
+ if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) {
+ memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
+ SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+ }
return nfs_ok;
}
@@ -463,6 +468,10 @@ nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_nofilehandle;
fh_dup2(&cstate->save_fh, &cstate->current_fh);
+ if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
+ memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
+ SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG);
+ }
return nfs_ok;
}
@@ -481,14 +490,20 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&access->ac_supported);
}
+static void gen_boot_verifier(nfs4_verifier *verifier)
+{
+ __be32 verf[2];
+
+ verf[0] = (__be32)nfssvc_boot.tv_sec;
+ verf[1] = (__be32)nfssvc_boot.tv_usec;
+ memcpy(verifier->data, verf, sizeof(verifier->data));
+}
+
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
- u32 *p = (u32 *)commit->co_verf.data;
- *p++ = nfssvc_boot.tv_sec;
- *p++ = nfssvc_boot.tv_usec;
-
+ gen_boot_verifier(&commit->co_verf);
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
}
@@ -865,7 +880,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
stateid_t *stateid = &write->wr_stateid;
struct file *filp = NULL;
- u32 *p;
__be32 status = nfs_ok;
unsigned long cnt;
@@ -887,9 +901,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
cnt = write->wr_buflen;
write->wr_how_written = write->wr_stable_how;
- p = (u32 *)write->wr_verifier.data;
- *p++ = nfssvc_boot.tv_sec;
- *p++ = nfssvc_boot.tv_usec;
+ gen_boot_verifier(&write->wr_verifier);
status = nfsd_write(rqstp, &cstate->current_fh, filp,
write->wr_offset, rqstp->rq_vec, write->wr_vlen,
@@ -1000,6 +1012,8 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
+typedef void(*stateid_setter)(struct nfsd4_compound_state *, void *);
+typedef void(*stateid_getter)(struct nfsd4_compound_state *, void *);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
@@ -1025,6 +1039,10 @@ enum nfsd4_op_flags {
* the v4.0 case).
*/
OP_CACHEME = 1 << 6,
+ /*
+ * These are ops which clear current state id.
+ */
+ OP_CLEAR_STATEID = 1 << 7,
};
struct nfsd4_operation {
@@ -1033,11 +1051,15 @@ struct nfsd4_operation {
char *op_name;
/* Try to get response size before operation */
nfsd4op_rsize op_rsize_bop;
+ stateid_setter op_get_currentstateid;
+ stateid_getter op_set_currentstateid;
};
static struct nfsd4_operation nfsd4_ops[];
+#ifdef NFSD_DEBUG
static const char *nfsd4_op_name(unsigned opnum);
+#endif
/*
* Enforce NFSv4.1 COMPOUND ordering rules:
@@ -1215,13 +1237,23 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
if (op->status)
goto encode_op;
- if (opdesc->op_func)
+ if (opdesc->op_func) {
+ if (opdesc->op_get_currentstateid)
+ opdesc->op_get_currentstateid(cstate, &op->u);
op->status = opdesc->op_func(rqstp, cstate, &op->u);
- else
+ } else
BUG_ON(op->status == nfs_ok);
- if (!op->status && need_wrongsec_check(rqstp))
- op->status = check_nfsd_access(cstate->current_fh.fh_export, rqstp);
+ if (!op->status) {
+ if (opdesc->op_set_currentstateid)
+ opdesc->op_set_currentstateid(cstate, &op->u);
+
+ if (opdesc->op_flags & OP_CLEAR_STATEID)
+ clear_current_stateid(cstate);
+
+ if (need_wrongsec_check(rqstp))
+ op->status = check_nfsd_access(cstate->current_fh.fh_export, rqstp);
+ }
encode_op:
/* Only from SEQUENCE */
@@ -1413,6 +1445,8 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_closestateid,
+ .op_set_currentstateid = (stateid_setter)nfsd4_set_closestateid,
},
[OP_COMMIT] = {
.op_func = (nfsd4op_func)nfsd4_commit,
@@ -1422,7 +1456,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_CREATE] = {
.op_func = (nfsd4op_func)nfsd4_create,
- .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
.op_name = "OP_CREATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
},
@@ -1431,6 +1465,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
.op_rsize_bop = nfsd4_only_status_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_delegreturnstateid,
},
[OP_GETATTR] = {
.op_func = (nfsd4op_func)nfsd4_getattr,
@@ -1453,6 +1488,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
+ .op_set_currentstateid = (stateid_setter)nfsd4_set_lockstateid,
},
[OP_LOCKT] = {
.op_func = (nfsd4op_func)nfsd4_lockt,
@@ -1463,15 +1499,16 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_lockustateid,
},
[OP_LOOKUP] = {
.op_func = (nfsd4op_func)nfsd4_lookup,
- .op_flags = OP_HANDLES_WRONGSEC,
+ .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUP",
},
[OP_LOOKUPP] = {
.op_func = (nfsd4op_func)nfsd4_lookupp,
- .op_flags = OP_HANDLES_WRONGSEC,
+ .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUPP",
},
[OP_NVERIFY] = {
@@ -1483,6 +1520,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
+ .op_set_currentstateid = (stateid_setter)nfsd4_set_openstateid,
},
[OP_OPEN_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_open_confirm,
@@ -1495,25 +1533,30 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_opendowngradestateid,
+ .op_set_currentstateid = (stateid_setter)nfsd4_set_opendowngradestateid,
},
[OP_PUTFH] = {
.op_func = (nfsd4op_func)nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING
+ | OP_CLEAR_STATEID,
.op_name = "OP_PUTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING
+ | OP_CLEAR_STATEID,
.op_name = "OP_PUTPUBFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING
+ | OP_CLEAR_STATEID,
.op_name = "OP_PUTROOTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
@@ -1522,6 +1565,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_READ",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_readstateid,
},
[OP_READDIR] = {
.op_func = (nfsd4op_func)nfsd4_readdir,
@@ -1576,6 +1620,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_SETATTR",
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_setattrstateid,
},
[OP_SETCLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_setclientid,
@@ -1600,6 +1645,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_writestateid,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = (nfsd4op_func)nfsd4_release_lockowner,
@@ -1674,12 +1720,14 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
};
+#ifdef NFSD_DEBUG
static const char *nfsd4_op_name(unsigned opnum)
{
if (opnum < ARRAY_SIZE(nfsd4_ops))
return nfsd4_ops[opnum].op_name;
return "unknown_operation";
}
+#endif
#define nfsd4_voidres nfsd4_voidargs
struct nfsd4_voidargs { int dummy; };
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 0b3e875d1abd..4767429264a2 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 The Regents of the University of Michigan.
+* Copyright (c) 2012 Jeff Layton <jlayton@redhat.com>
* All rights reserved.
*
* Andy Adamson <andros@citi.umich.edu>
@@ -36,16 +37,34 @@
#include <linux/namei.h>
#include <linux/crypto.h>
#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <net/net_namespace.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/cld.h>
#include "nfsd.h"
#include "state.h"
#include "vfs.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
+/* Declarations */
+struct nfsd4_client_tracking_ops {
+ int (*init)(struct net *);
+ void (*exit)(struct net *);
+ void (*create)(struct nfs4_client *);
+ void (*remove)(struct nfs4_client *);
+ int (*check)(struct nfs4_client *);
+ void (*grace_done)(struct net *, time_t);
+};
+
/* Globals */
static struct file *rec_file;
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
+static struct nfsd4_client_tracking_ops *client_tracking_ops;
static int
nfs4_save_creds(const struct cred **original_creds)
@@ -117,7 +136,8 @@ out_no_tfm:
return status;
}
-void nfsd4_create_clid_dir(struct nfs4_client *clp)
+static void
+nfsd4_create_clid_dir(struct nfs4_client *clp)
{
const struct cred *original_cred;
char *dname = clp->cl_recdir;
@@ -126,9 +146,8 @@ void nfsd4_create_clid_dir(struct nfs4_client *clp)
dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname);
- if (clp->cl_firststate)
+ if (test_and_set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- clp->cl_firststate = 1;
if (!rec_file)
return;
status = nfs4_save_creds(&original_cred);
@@ -265,19 +284,19 @@ out_unlock:
return status;
}
-void
+static void
nfsd4_remove_clid_dir(struct nfs4_client *clp)
{
const struct cred *original_cred;
int status;
- if (!rec_file || !clp->cl_firststate)
+ if (!rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
status = mnt_want_write_file(rec_file);
if (status)
goto out;
- clp->cl_firststate = 0;
+ clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
status = nfs4_save_creds(&original_cred);
if (status < 0)
@@ -292,7 +311,6 @@ out:
if (status)
printk("NFSD: Failed to remove expired client state directory"
" %.*s\n", HEXDIR_LEN, clp->cl_recdir);
- return;
}
static int
@@ -311,8 +329,9 @@ purge_old(struct dentry *parent, struct dentry *child)
return 0;
}
-void
-nfsd4_recdir_purge_old(void) {
+static void
+nfsd4_recdir_purge_old(struct net *net, time_t boot_time)
+{
int status;
if (!rec_file)
@@ -343,7 +362,7 @@ load_recdir(struct dentry *parent, struct dentry *child)
return 0;
}
-int
+static int
nfsd4_recdir_load(void) {
int status;
@@ -361,8 +380,8 @@ nfsd4_recdir_load(void) {
* Hold reference to the recovery directory.
*/
-void
-nfsd4_init_recdir()
+static int
+nfsd4_init_recdir(void)
{
const struct cred *original_cred;
int status;
@@ -377,20 +396,44 @@ nfsd4_init_recdir()
printk("NFSD: Unable to change credentials to find recovery"
" directory: error %d\n",
status);
- return;
+ return status;
}
rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
if (IS_ERR(rec_file)) {
printk("NFSD: unable to find recovery directory %s\n",
user_recovery_dirname);
+ status = PTR_ERR(rec_file);
rec_file = NULL;
}
nfs4_reset_creds(original_cred);
+ return status;
}
-void
+static int
+nfsd4_load_reboot_recovery_data(struct net *net)
+{
+ int status;
+
+ /* XXX: The legacy code won't work in a container */
+ if (net != &init_net) {
+ WARN(1, KERN_ERR "NFSD: attempt to initialize legacy client "
+ "tracking in a container!\n");
+ return -EINVAL;
+ }
+
+ nfs4_lock_state();
+ status = nfsd4_init_recdir();
+ if (!status)
+ status = nfsd4_recdir_load();
+ nfs4_unlock_state();
+ if (status)
+ printk(KERN_ERR "NFSD: Failure reading reboot recovery data\n");
+ return status;
+}
+
+static void
nfsd4_shutdown_recdir(void)
{
if (!rec_file)
@@ -399,6 +442,13 @@ nfsd4_shutdown_recdir(void)
rec_file = NULL;
}
+static void
+nfsd4_legacy_tracking_exit(struct net *net)
+{
+ nfs4_release_reclaim();
+ nfsd4_shutdown_recdir();
+}
+
/*
* Change the NFSv4 recovery directory to recdir.
*/
@@ -425,3 +475,572 @@ nfs4_recoverydir(void)
{
return user_recovery_dirname;
}
+
+static int
+nfsd4_check_legacy_client(struct nfs4_client *clp)
+{
+ /* did we already find that this client is stable? */
+ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ return 0;
+
+ /* look for it in the reclaim hashtable otherwise */
+ if (nfsd4_find_reclaim_client(clp)) {
+ set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
+ .init = nfsd4_load_reboot_recovery_data,
+ .exit = nfsd4_legacy_tracking_exit,
+ .create = nfsd4_create_clid_dir,
+ .remove = nfsd4_remove_clid_dir,
+ .check = nfsd4_check_legacy_client,
+ .grace_done = nfsd4_recdir_purge_old,
+};
+
+/* Globals */
+#define NFSD_PIPE_DIR "nfsd"
+#define NFSD_CLD_PIPE "cld"
+
+/* per-net-ns structure for holding cld upcall info */
+struct cld_net {
+ struct rpc_pipe *cn_pipe;
+ spinlock_t cn_lock;
+ struct list_head cn_list;
+ unsigned int cn_xid;
+};
+
+struct cld_upcall {
+ struct list_head cu_list;
+ struct cld_net *cu_net;
+ struct task_struct *cu_task;
+ struct cld_msg cu_msg;
+};
+
+static int
+__cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
+{
+ int ret;
+ struct rpc_pipe_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.data = cmsg;
+ msg.len = sizeof(*cmsg);
+
+ /*
+ * Set task state before we queue the upcall. That prevents
+ * wake_up_process in the downcall from racing with schedule.
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ ret = rpc_queue_upcall(pipe, &msg);
+ if (ret < 0) {
+ set_current_state(TASK_RUNNING);
+ goto out;
+ }
+
+ schedule();
+ set_current_state(TASK_RUNNING);
+
+ if (msg.errno < 0)
+ ret = msg.errno;
+out:
+ return ret;
+}
+
+static int
+cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
+{
+ int ret;
+
+ /*
+ * -EAGAIN occurs when pipe is closed and reopened while there are
+ * upcalls queued.
+ */
+ do {
+ ret = __cld_pipe_upcall(pipe, cmsg);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static ssize_t
+cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+{
+ struct cld_upcall *tmp, *cup;
+ struct cld_msg *cmsg = (struct cld_msg *)src;
+ uint32_t xid;
+ struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
+ nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+
+ if (mlen != sizeof(*cmsg)) {
+ dprintk("%s: got %lu bytes, expected %lu\n", __func__, mlen,
+ sizeof(*cmsg));
+ return -EINVAL;
+ }
+
+ /* copy just the xid so we can try to find that */
+ if (copy_from_user(&xid, &cmsg->cm_xid, sizeof(xid)) != 0) {
+ dprintk("%s: error when copying xid from userspace", __func__);
+ return -EFAULT;
+ }
+
+ /* walk the list and find corresponding xid */
+ cup = NULL;
+ spin_lock(&cn->cn_lock);
+ list_for_each_entry(tmp, &cn->cn_list, cu_list) {
+ if (get_unaligned(&tmp->cu_msg.cm_xid) == xid) {
+ cup = tmp;
+ list_del_init(&cup->cu_list);
+ break;
+ }
+ }
+ spin_unlock(&cn->cn_lock);
+
+ /* couldn't find upcall? */
+ if (!cup) {
+ dprintk("%s: couldn't find upcall -- xid=%u\n", __func__, xid);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&cup->cu_msg, src, mlen) != 0)
+ return -EFAULT;
+
+ wake_up_process(cup->cu_task);
+ return mlen;
+}
+
+static void
+cld_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+{
+ struct cld_msg *cmsg = msg->data;
+ struct cld_upcall *cup = container_of(cmsg, struct cld_upcall,
+ cu_msg);
+
+ /* errno >= 0 means we got a downcall */
+ if (msg->errno >= 0)
+ return;
+
+ wake_up_process(cup->cu_task);
+}
+
+static const struct rpc_pipe_ops cld_upcall_ops = {
+ .upcall = rpc_pipe_generic_upcall,
+ .downcall = cld_pipe_downcall,
+ .destroy_msg = cld_pipe_destroy_msg,
+};
+
+static struct dentry *
+nfsd4_cld_register_sb(struct super_block *sb, struct rpc_pipe *pipe)
+{
+ struct dentry *dir, *dentry;
+
+ dir = rpc_d_lookup_sb(sb, NFSD_PIPE_DIR);
+ if (dir == NULL)
+ return ERR_PTR(-ENOENT);
+ dentry = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
+ dput(dir);
+ return dentry;
+}
+
+static void
+nfsd4_cld_unregister_sb(struct rpc_pipe *pipe)
+{
+ if (pipe->dentry)
+ rpc_unlink(pipe->dentry);
+}
+
+static struct dentry *
+nfsd4_cld_register_net(struct net *net, struct rpc_pipe *pipe)
+{
+ struct super_block *sb;
+ struct dentry *dentry;
+
+ sb = rpc_get_sb_net(net);
+ if (!sb)
+ return NULL;
+ dentry = nfsd4_cld_register_sb(sb, pipe);
+ rpc_put_sb_net(net);
+ return dentry;
+}
+
+static void
+nfsd4_cld_unregister_net(struct net *net, struct rpc_pipe *pipe)
+{
+ struct super_block *sb;
+
+ sb = rpc_get_sb_net(net);
+ if (sb) {
+ nfsd4_cld_unregister_sb(pipe);
+ rpc_put_sb_net(net);
+ }
+}
+
+/* Initialize rpc_pipefs pipe for communication with client tracking daemon */
+static int
+nfsd4_init_cld_pipe(struct net *net)
+{
+ int ret;
+ struct dentry *dentry;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct cld_net *cn;
+
+ if (nn->cld_net)
+ return 0;
+
+ cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+ if (!cn) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cn->cn_pipe = rpc_mkpipe_data(&cld_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
+ if (IS_ERR(cn->cn_pipe)) {
+ ret = PTR_ERR(cn->cn_pipe);
+ goto err;
+ }
+ spin_lock_init(&cn->cn_lock);
+ INIT_LIST_HEAD(&cn->cn_list);
+
+ dentry = nfsd4_cld_register_net(net, cn->cn_pipe);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err_destroy_data;
+ }
+
+ cn->cn_pipe->dentry = dentry;
+ nn->cld_net = cn;
+ return 0;
+
+err_destroy_data:
+ rpc_destroy_pipe_data(cn->cn_pipe);
+err:
+ kfree(cn);
+ printk(KERN_ERR "NFSD: unable to create nfsdcld upcall pipe (%d)\n",
+ ret);
+ return ret;
+}
+
+static void
+nfsd4_remove_cld_pipe(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+
+ nfsd4_cld_unregister_net(net, cn->cn_pipe);
+ rpc_destroy_pipe_data(cn->cn_pipe);
+ kfree(nn->cld_net);
+ nn->cld_net = NULL;
+}
+
+static struct cld_upcall *
+alloc_cld_upcall(struct cld_net *cn)
+{
+ struct cld_upcall *new, *tmp;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return new;
+
+ /* FIXME: hard cap on number in flight? */
+restart_search:
+ spin_lock(&cn->cn_lock);
+ list_for_each_entry(tmp, &cn->cn_list, cu_list) {
+ if (tmp->cu_msg.cm_xid == cn->cn_xid) {
+ cn->cn_xid++;
+ spin_unlock(&cn->cn_lock);
+ goto restart_search;
+ }
+ }
+ new->cu_task = current;
+ new->cu_msg.cm_vers = CLD_UPCALL_VERSION;
+ put_unaligned(cn->cn_xid++, &new->cu_msg.cm_xid);
+ new->cu_net = cn;
+ list_add(&new->cu_list, &cn->cn_list);
+ spin_unlock(&cn->cn_lock);
+
+ dprintk("%s: allocated xid %u\n", __func__, new->cu_msg.cm_xid);
+
+ return new;
+}
+
+static void
+free_cld_upcall(struct cld_upcall *victim)
+{
+ struct cld_net *cn = victim->cu_net;
+
+ spin_lock(&cn->cn_lock);
+ list_del(&victim->cu_list);
+ spin_unlock(&cn->cn_lock);
+ kfree(victim);
+}
+
+/* Ask daemon to create a new record */
+static void
+nfsd4_cld_create(struct nfs4_client *clp)
+{
+ int ret;
+ struct cld_upcall *cup;
+ /* FIXME: determine net from clp */
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+
+ /* Don't upcall if it's already stored */
+ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ return;
+
+ cup = alloc_cld_upcall(cn);
+ if (!cup) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ cup->cu_msg.cm_cmd = Cld_Create;
+ cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
+ memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
+ clp->cl_name.len);
+
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ if (!ret) {
+ ret = cup->cu_msg.cm_status;
+ set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ }
+
+ free_cld_upcall(cup);
+out_err:
+ if (ret)
+ printk(KERN_ERR "NFSD: Unable to create client "
+ "record on stable storage: %d\n", ret);
+}
+
+/* Ask daemon to create a new record */
+static void
+nfsd4_cld_remove(struct nfs4_client *clp)
+{
+ int ret;
+ struct cld_upcall *cup;
+ /* FIXME: determine net from clp */
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+
+ /* Don't upcall if it's already removed */
+ if (!test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ return;
+
+ cup = alloc_cld_upcall(cn);
+ if (!cup) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ cup->cu_msg.cm_cmd = Cld_Remove;
+ cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
+ memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
+ clp->cl_name.len);
+
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ if (!ret) {
+ ret = cup->cu_msg.cm_status;
+ clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ }
+
+ free_cld_upcall(cup);
+out_err:
+ if (ret)
+ printk(KERN_ERR "NFSD: Unable to remove client "
+ "record from stable storage: %d\n", ret);
+}
+
+/* Check for presence of a record, and update its timestamp */
+static int
+nfsd4_cld_check(struct nfs4_client *clp)
+{
+ int ret;
+ struct cld_upcall *cup;
+ /* FIXME: determine net from clp */
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+
+ /* Don't upcall if one was already stored during this grace pd */
+ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ return 0;
+
+ cup = alloc_cld_upcall(cn);
+ if (!cup) {
+ printk(KERN_ERR "NFSD: Unable to check client record on "
+ "stable storage: %d\n", -ENOMEM);
+ return -ENOMEM;
+ }
+
+ cup->cu_msg.cm_cmd = Cld_Check;
+ cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
+ memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
+ clp->cl_name.len);
+
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ if (!ret) {
+ ret = cup->cu_msg.cm_status;
+ set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ }
+
+ free_cld_upcall(cup);
+ return ret;
+}
+
+static void
+nfsd4_cld_grace_done(struct net *net, time_t boot_time)
+{
+ int ret;
+ struct cld_upcall *cup;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+
+ cup = alloc_cld_upcall(cn);
+ if (!cup) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ cup->cu_msg.cm_cmd = Cld_GraceDone;
+ cup->cu_msg.cm_u.cm_gracetime = (int64_t)boot_time;
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ if (!ret)
+ ret = cup->cu_msg.cm_status;
+
+ free_cld_upcall(cup);
+out_err:
+ if (ret)
+ printk(KERN_ERR "NFSD: Unable to end grace period: %d\n", ret);
+}
+
+static struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = {
+ .init = nfsd4_init_cld_pipe,
+ .exit = nfsd4_remove_cld_pipe,
+ .create = nfsd4_cld_create,
+ .remove = nfsd4_cld_remove,
+ .check = nfsd4_cld_check,
+ .grace_done = nfsd4_cld_grace_done,
+};
+
+int
+nfsd4_client_tracking_init(struct net *net)
+{
+ int status;
+ struct path path;
+
+ if (!client_tracking_ops) {
+ client_tracking_ops = &nfsd4_cld_tracking_ops;
+ status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
+ if (!status) {
+ if (S_ISDIR(path.dentry->d_inode->i_mode))
+ client_tracking_ops =
+ &nfsd4_legacy_tracking_ops;
+ path_put(&path);
+ }
+ }
+
+ status = client_tracking_ops->init(net);
+ if (status) {
+ printk(KERN_WARNING "NFSD: Unable to initialize client "
+ "recovery tracking! (%d)\n", status);
+ client_tracking_ops = NULL;
+ }
+ return status;
+}
+
+void
+nfsd4_client_tracking_exit(struct net *net)
+{
+ if (client_tracking_ops) {
+ client_tracking_ops->exit(net);
+ client_tracking_ops = NULL;
+ }
+}
+
+void
+nfsd4_client_record_create(struct nfs4_client *clp)
+{
+ if (client_tracking_ops)
+ client_tracking_ops->create(clp);
+}
+
+void
+nfsd4_client_record_remove(struct nfs4_client *clp)
+{
+ if (client_tracking_ops)
+ client_tracking_ops->remove(clp);
+}
+
+int
+nfsd4_client_record_check(struct nfs4_client *clp)
+{
+ if (client_tracking_ops)
+ return client_tracking_ops->check(clp);
+
+ return -EOPNOTSUPP;
+}
+
+void
+nfsd4_record_grace_done(struct net *net, time_t boot_time)
+{
+ if (client_tracking_ops)
+ client_tracking_ops->grace_done(net, boot_time);
+}
+
+static int
+rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct super_block *sb = ptr;
+ struct net *net = sb->s_fs_info;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+ struct dentry *dentry;
+ int ret = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return 0;
+
+ if (!cn) {
+ module_put(THIS_MODULE);
+ return 0;
+ }
+
+ switch (event) {
+ case RPC_PIPEFS_MOUNT:
+ dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ break;
+ }
+ cn->cn_pipe->dentry = dentry;
+ break;
+ case RPC_PIPEFS_UMOUNT:
+ if (cn->cn_pipe->dentry)
+ nfsd4_cld_unregister_sb(cn->cn_pipe);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+struct notifier_block nfsd4_cld_block = {
+ .notifier_call = rpc_pipefs_event,
+};
+
+int
+register_cld_notifier(void)
+{
+ return rpc_pipefs_notifier_register(&nfsd4_cld_block);
+}
+
+void
+unregister_cld_notifier(void)
+{
+ rpc_pipefs_notifier_unregister(&nfsd4_cld_block);
+}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c5cddd659429..1841f8bf845e 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -58,11 +58,15 @@ static const stateid_t one_stateid = {
static const stateid_t zero_stateid = {
/* all fields zero */
};
+static const stateid_t currentstateid = {
+ .si_generation = 1,
+};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
+#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
/* forward declarations */
static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
@@ -91,6 +95,19 @@ nfs4_lock_state(void)
mutex_lock(&client_mutex);
}
+static void free_session(struct kref *);
+
+/* Must be called under the client_lock */
+static void nfsd4_put_session_locked(struct nfsd4_session *ses)
+{
+ kref_put(&ses->se_ref, free_session);
+}
+
+static void nfsd4_get_session(struct nfsd4_session *ses)
+{
+ kref_get(&ses->se_ref);
+}
+
void
nfs4_unlock_state(void)
{
@@ -605,12 +622,20 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
return sid->sequence % SESSION_HASH_SIZE;
}
+#ifdef NFSD_DEBUG
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
u32 *ptr = (u32 *)(&sessionid->data[0]);
dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
}
+#else
+static inline void
+dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
+{
+}
+#endif
+
static void
gen_sessionid(struct nfsd4_session *ses)
@@ -832,11 +857,12 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
spin_unlock(&clp->cl_lock);
}
-void free_session(struct kref *kref)
+static void free_session(struct kref *kref)
{
struct nfsd4_session *ses;
int mem;
+ BUG_ON(!spin_is_locked(&client_lock));
ses = container_of(kref, struct nfsd4_session, se_ref);
nfsd4_del_conns(ses);
spin_lock(&nfsd_drc_lock);
@@ -847,6 +873,13 @@ void free_session(struct kref *kref)
kfree(ses);
}
+void nfsd4_put_session(struct nfsd4_session *ses)
+{
+ spin_lock(&client_lock);
+ nfsd4_put_session_locked(ses);
+ spin_unlock(&client_lock);
+}
+
static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
struct nfsd4_session *new;
@@ -894,7 +927,9 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
status = nfsd4_new_conn_from_crses(rqstp, new);
/* whoops: benny points out, status is ignored! (err, or bogus) */
if (status) {
+ spin_lock(&client_lock);
free_session(&new->se_ref);
+ spin_unlock(&client_lock);
return NULL;
}
if (cses->flags & SESSION4_BACK_CHAN) {
@@ -1006,12 +1041,13 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
static inline void
free_client(struct nfs4_client *clp)
{
+ BUG_ON(!spin_is_locked(&client_lock));
while (!list_empty(&clp->cl_sessions)) {
struct nfsd4_session *ses;
ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
se_perclnt);
list_del(&ses->se_perclnt);
- nfsd4_put_session(ses);
+ nfsd4_put_session_locked(ses);
}
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
@@ -1138,12 +1174,12 @@ static void gen_clid(struct nfs4_client *clp)
static void gen_confirm(struct nfs4_client *clp)
{
+ __be32 verf[2];
static u32 i;
- u32 *p;
- p = (u32 *)clp->cl_confirm.data;
- *p++ = get_seconds();
- *p++ = i++;
+ verf[0] = (__be32)get_seconds();
+ verf[1] = (__be32)i++;
+ memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
}
static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
@@ -1180,7 +1216,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
if (princ) {
clp->cl_principal = kstrdup(princ, GFP_KERNEL);
if (clp->cl_principal == NULL) {
+ spin_lock(&client_lock);
free_client(clp);
+ spin_unlock(&client_lock);
return NULL;
}
}
@@ -1347,6 +1385,7 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
slot->sl_opcnt = resp->opcnt;
slot->sl_status = resp->cstate.status;
+ slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
if (nfsd4_not_cached(resp)) {
slot->sl_datalen = 0;
return;
@@ -1374,15 +1413,12 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
struct nfsd4_op *op;
struct nfsd4_slot *slot = resp->cstate.slot;
- dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
- resp->opcnt, resp->cstate.slot->sl_cachethis);
-
/* Encode the replayed sequence operation */
op = &args->ops[resp->opcnt - 1];
nfsd4_encode_operation(resp, op);
/* Return nfserr_retry_uncached_rep in next operation. */
- if (args->opcnt > 1 && slot->sl_cachethis == 0) {
+ if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
op = &args->ops[resp->opcnt++];
op->status = nfserr_retry_uncached_rep;
nfsd4_encode_operation(resp, op);
@@ -1575,16 +1611,11 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
else
return nfserr_seq_misordered;
}
- /* Normal */
+ /* Note unsigned 32-bit arithmetic handles wraparound: */
if (likely(seqid == slot_seqid + 1))
return nfs_ok;
- /* Replay */
if (seqid == slot_seqid)
return nfserr_replay_cache;
- /* Wraparound */
- if (seqid == 1 && (slot_seqid + 1) == 0)
- return nfs_ok;
- /* Misordered replay or misordered new request */
return nfserr_seq_misordered;
}
@@ -1815,9 +1846,10 @@ nfsd4_destroy_session(struct svc_rqst *r,
nfsd4_probe_callback_sync(ses->se_client);
nfs4_unlock_state();
+ spin_lock(&client_lock);
nfsd4_del_conns(ses);
-
- nfsd4_put_session(ses);
+ nfsd4_put_session_locked(ses);
+ spin_unlock(&client_lock);
status = nfs_ok;
out:
dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1921,8 +1953,12 @@ nfsd4_sequence(struct svc_rqst *rqstp,
* sr_highest_slotid and the sr_target_slot id to maxslots */
seq->maxslots = session->se_fchannel.maxreqs;
- status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
+ status = check_slot_seqid(seq->seqid, slot->sl_seqid,
+ slot->sl_flags & NFSD4_SLOT_INUSE);
if (status == nfserr_replay_cache) {
+ status = nfserr_seq_misordered;
+ if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
+ goto out;
cstate->slot = slot;
cstate->session = session;
/* Return the cached reply status and set cstate->status
@@ -1938,9 +1974,12 @@ nfsd4_sequence(struct svc_rqst *rqstp,
conn = NULL;
/* Success! bump slot seqid */
- slot->sl_inuse = true;
slot->sl_seqid = seq->seqid;
- slot->sl_cachethis = seq->cachethis;
+ slot->sl_flags |= NFSD4_SLOT_INUSE;
+ if (seq->cachethis)
+ slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
+ else
+ slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
cstate->slot = slot;
cstate->session = session;
@@ -2030,7 +2069,8 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
nfs4_lock_state();
status = nfserr_complete_already;
- if (cstate->session->se_client->cl_firststate)
+ if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
+ &cstate->session->se_client->cl_flags))
goto out;
status = nfserr_stale_clientid;
@@ -2045,7 +2085,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
goto out;
status = nfs_ok;
- nfsd4_create_clid_dir(cstate->session->se_client);
+ nfsd4_client_record_create(cstate->session->se_client);
out:
nfs4_unlock_state();
return status;
@@ -2240,7 +2280,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
conf = find_confirmed_client_by_str(unconf->cl_recdir,
hash);
if (conf) {
- nfsd4_remove_clid_dir(conf);
+ nfsd4_client_record_remove(conf);
expire_client(conf);
}
move_to_confirmed(unconf);
@@ -2633,8 +2673,6 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
static int share_access_to_flags(u32 share_access)
{
- share_access &= ~NFS4_SHARE_WANT_MASK;
-
return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
}
@@ -2776,10 +2814,9 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
static void
-nfs4_set_claim_prev(struct nfsd4_open *open)
+nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
{
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
- open->op_openowner->oo_owner.so_client->cl_firststate = 1;
}
/* Should we give out recallable state?: */
@@ -2855,6 +2892,27 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
return 0;
}
+static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
+{
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ if (status == -EAGAIN)
+ open->op_why_no_deleg = WND4_CONTENTION;
+ else {
+ open->op_why_no_deleg = WND4_RESOURCE;
+ switch (open->op_deleg_want) {
+ case NFS4_SHARE_WANT_READ_DELEG:
+ case NFS4_SHARE_WANT_WRITE_DELEG:
+ case NFS4_SHARE_WANT_ANY_DELEG:
+ break;
+ case NFS4_SHARE_WANT_CANCEL:
+ open->op_why_no_deleg = WND4_CANCELLED;
+ break;
+ case NFS4_SHARE_WANT_NO_DELEG:
+ BUG(); /* not supposed to get here */
+ }
+ }
+}
+
/*
* Attempt to hand out a delegation.
*/
@@ -2864,7 +2922,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_
struct nfs4_delegation *dp;
struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
int cb_up;
- int status, flag = 0;
+ int status = 0, flag = 0;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2905,11 +2963,16 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_
dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
STATEID_VAL(&dp->dl_stid.sc_stateid));
out:
- if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
- && flag == NFS4_OPEN_DELEGATE_NONE
- && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
- dprintk("NFSD: WARNING: refusing delegation reclaim\n");
open->op_delegate_type = flag;
+ if (flag == NFS4_OPEN_DELEGATE_NONE) {
+ if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
+ open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
+ dprintk("NFSD: WARNING: refusing delegation reclaim\n");
+
+ /* 4.1 client asking for a delegation? */
+ if (open->op_deleg_want)
+ nfsd4_open_deleg_none_ext(open, status);
+ }
return;
out_free:
nfs4_put_delegation(dp);
@@ -2918,6 +2981,24 @@ out_no_deleg:
goto out;
}
+static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
+ struct nfs4_delegation *dp)
+{
+ if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
+ dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
+ } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
+ dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ }
+ /* Otherwise the client must be confused wanting a delegation
+ * it already has, therefore we don't return
+ * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
+ */
+}
+
/*
* called with nfs4_lock_state() held.
*/
@@ -2979,24 +3060,36 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
- if (nfsd4_has_session(&resp->cstate))
+ if (nfsd4_has_session(&resp->cstate)) {
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
+ if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_WANTED;
+ goto nodeleg;
+ }
+ }
+
/*
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
nfs4_open_delegation(current_fh, open, stp);
-
+nodeleg:
status = nfs_ok;
dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
STATEID_VAL(&stp->st_stid.sc_stateid));
out:
+ /* 4.1 client trying to upgrade/downgrade delegation? */
+ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
+ open->op_deleg_want)
+ nfsd4_deleg_xgrade_none_ext(open, dp);
+
if (fp)
put_nfs4_file(fp);
if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
- nfs4_set_claim_prev(open);
+ nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
/*
* To finish the open response, we just need to set the rflags.
*/
@@ -3066,7 +3159,7 @@ static void
nfsd4_end_grace(void)
{
dprintk("NFSD: end of grace period\n");
- nfsd4_recdir_purge_old();
+ nfsd4_record_grace_done(&init_net, boot_time);
locks_end_grace(&nfsd4_manager);
/*
* Now that every NFSv4 client has had the chance to recover and
@@ -3115,7 +3208,7 @@ nfs4_laundromat(void)
clp = list_entry(pos, struct nfs4_client, cl_lru);
dprintk("NFSD: purging unused client (clientid %08x)\n",
clp->cl_clientid.cl_id);
- nfsd4_remove_clid_dir(clp);
+ nfsd4_client_record_remove(clp);
expire_client(clp);
}
spin_lock(&recall_lock);
@@ -3400,7 +3493,14 @@ __be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_test_stateid *test_stateid)
{
- /* real work is done during encoding */
+ struct nfsd4_test_stateid_id *stateid;
+ struct nfs4_client *cl = cstate->session->se_client;
+
+ nfs4_lock_state();
+ list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
+ stateid->ts_id_status = nfs4_validate_stateid(cl, &stateid->ts_id_stateid);
+ nfs4_unlock_state();
+
return nfs_ok;
}
@@ -3539,7 +3639,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
- nfsd4_create_clid_dir(oo->oo_owner.so_client);
+ nfsd4_client_record_create(oo->oo_owner.so_client);
status = nfs_ok;
out:
if (!cstate->replay_owner)
@@ -3596,7 +3696,9 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
cstate->current_fh.fh_dentry->d_name.name);
/* We don't yet support WANT bits: */
- od->od_share_access &= NFS4_SHARE_ACCESS_MASK;
+ if (od->od_deleg_want)
+ dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
+ od->od_deleg_want);
nfs4_lock_state();
status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
@@ -4353,7 +4455,9 @@ nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
struct nfs4_client *clp;
clp = find_confirmed_client_by_str(name, strhashval);
- return clp ? 1 : 0;
+ if (!clp)
+ return 0;
+ return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
/*
@@ -4377,7 +4481,7 @@ nfs4_client_to_reclaim(const char *name)
return 1;
}
-static void
+void
nfs4_release_reclaim(void)
{
struct nfs4_client_reclaim *crp = NULL;
@@ -4397,19 +4501,12 @@ nfs4_release_reclaim(void)
/*
* called from OPEN, CLAIM_PREVIOUS with a new clientid. */
-static struct nfs4_client_reclaim *
-nfs4_find_reclaim_client(clientid_t *clid)
+struct nfs4_client_reclaim *
+nfsd4_find_reclaim_client(struct nfs4_client *clp)
{
unsigned int strhashval;
- struct nfs4_client *clp;
struct nfs4_client_reclaim *crp = NULL;
-
- /* find clientid in conf_id_hashtbl */
- clp = find_confirmed_client(clid);
- if (clp == NULL)
- return NULL;
-
dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
clp->cl_name.len, clp->cl_name.data,
clp->cl_recdir);
@@ -4430,7 +4527,14 @@ nfs4_find_reclaim_client(clientid_t *clid)
__be32
nfs4_check_open_reclaim(clientid_t *clid)
{
- return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad;
+ struct nfs4_client *clp;
+
+ /* find clientid in conf_id_hashtbl */
+ clp = find_confirmed_client(clid);
+ if (clp == NULL)
+ return nfserr_reclaim_bad;
+
+ return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
}
#ifdef CONFIG_NFSD_FAULT_INJECTION
@@ -4442,7 +4546,7 @@ void nfsd_forget_clients(u64 num)
nfs4_lock_state();
list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
- nfsd4_remove_clid_dir(clp);
+ nfsd4_client_record_remove(clp);
expire_client(clp);
if (++count == num)
break;
@@ -4577,19 +4681,6 @@ nfs4_state_init(void)
reclaim_str_hashtbl_size = 0;
}
-static void
-nfsd4_load_reboot_recovery_data(void)
-{
- int status;
-
- nfs4_lock_state();
- nfsd4_init_recdir();
- status = nfsd4_recdir_load();
- nfs4_unlock_state();
- if (status)
- printk("NFSD: Failure reading reboot recovery data\n");
-}
-
/*
* Since the lifetime of a delegation isn't limited to that of an open, a
* client may quite reasonably hang on to a delegation as long as it has
@@ -4613,21 +4704,34 @@ set_max_delegations(void)
/* initialization to perform when the nfsd service is started: */
-static int
-__nfs4_state_start(void)
+int
+nfs4_state_start(void)
{
int ret;
+ /*
+ * FIXME: For now, we hang most of the pernet global stuff off of
+ * init_net until nfsd is fully containerized. Eventually, we'll
+ * need to pass a net pointer into this function, take a reference
+ * to that instead and then do most of the rest of this on a per-net
+ * basis.
+ */
+ get_net(&init_net);
+ nfsd4_client_tracking_init(&init_net);
boot_time = get_seconds();
locks_start_grace(&nfsd4_manager);
printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
nfsd4_grace);
ret = set_callback_cred();
- if (ret)
- return -ENOMEM;
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_recovery;
+ }
laundry_wq = create_singlethread_workqueue("nfsd4");
- if (laundry_wq == NULL)
- return -ENOMEM;
+ if (laundry_wq == NULL) {
+ ret = -ENOMEM;
+ goto out_recovery;
+ }
ret = nfsd4_create_callback_queue();
if (ret)
goto out_free_laundry;
@@ -4636,16 +4740,12 @@ __nfs4_state_start(void)
return 0;
out_free_laundry:
destroy_workqueue(laundry_wq);
+out_recovery:
+ nfsd4_client_tracking_exit(&init_net);
+ put_net(&init_net);
return ret;
}
-int
-nfs4_state_start(void)
-{
- nfsd4_load_reboot_recovery_data();
- return __nfs4_state_start();
-}
-
static void
__nfs4_state_shutdown(void)
{
@@ -4676,7 +4776,8 @@ __nfs4_state_shutdown(void)
unhash_delegation(dp);
}
- nfsd4_shutdown_recdir();
+ nfsd4_client_tracking_exit(&init_net);
+ put_net(&init_net);
}
void
@@ -4686,8 +4787,108 @@ nfs4_state_shutdown(void)
destroy_workqueue(laundry_wq);
locks_end_grace(&nfsd4_manager);
nfs4_lock_state();
- nfs4_release_reclaim();
__nfs4_state_shutdown();
nfs4_unlock_state();
nfsd4_destroy_callback_queue();
}
+
+static void
+get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
+{
+ if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
+ memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
+}
+
+static void
+put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
+{
+ if (cstate->minorversion) {
+ memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
+ SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+ }
+}
+
+void
+clear_current_stateid(struct nfsd4_compound_state *cstate)
+{
+ CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+}
+
+/*
+ * functions to set current state id
+ */
+void
+nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
+{
+ put_stateid(cstate, &odp->od_stateid);
+}
+
+void
+nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+{
+ put_stateid(cstate, &open->op_stateid);
+}
+
+void
+nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
+{
+ put_stateid(cstate, &close->cl_stateid);
+}
+
+void
+nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
+{
+ put_stateid(cstate, &lock->lk_resp_stateid);
+}
+
+/*
+ * functions to consume current state id
+ */
+
+void
+nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
+{
+ get_stateid(cstate, &odp->od_stateid);
+}
+
+void
+nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
+{
+ get_stateid(cstate, &drp->dr_stateid);
+}
+
+void
+nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
+{
+ get_stateid(cstate, &fsp->fr_stateid);
+}
+
+void
+nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
+{
+ get_stateid(cstate, &setattr->sa_stateid);
+}
+
+void
+nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
+{
+ get_stateid(cstate, &close->cl_stateid);
+}
+
+void
+nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
+{
+ get_stateid(cstate, &locku->lu_stateid);
+}
+
+void
+nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
+{
+ get_stateid(cstate, &read->rd_stateid);
+}
+
+void
+nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
+{
+ get_stateid(cstate, &write->wr_stateid);
+}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0ec5a1b9700e..bcd8904ab1e3 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -133,22 +133,6 @@ xdr_error: \
} \
} while (0)
-static void save_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
-{
- savep->p = argp->p;
- savep->end = argp->end;
- savep->pagelen = argp->pagelen;
- savep->pagelist = argp->pagelist;
-}
-
-static void restore_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
-{
- argp->p = savep->p;
- argp->end = savep->end;
- argp->pagelen = savep->pagelen;
- argp->pagelist = savep->pagelist;
-}
-
static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
{
/* We want more bytes than seem to be available.
@@ -638,14 +622,18 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup
DECODE_TAIL;
}
-static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x)
+static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
{
__be32 *p;
u32 w;
READ_BUF(4);
READ32(w);
- *x = w;
+ *share_access = w & NFS4_SHARE_ACCESS_MASK;
+ *deleg_want = w & NFS4_SHARE_WANT_MASK;
+ if (deleg_when)
+ *deleg_when = w & NFS4_SHARE_WHEN_MASK;
+
switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE:
@@ -673,6 +661,9 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x)
w &= ~NFS4_SHARE_WANT_MASK;
if (!w)
return nfs_ok;
+
+ if (!deleg_when) /* open_downgrade */
+ return nfserr_inval;
switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
@@ -719,6 +710,7 @@ static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
DECODE_HEAD;
+ u32 dummy;
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
@@ -727,7 +719,9 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
/* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(4);
READ32(open->op_seqid);
- status = nfsd4_decode_share_access(argp, &open->op_share_access);
+ /* decode, yet ignore deleg_when until supported */
+ status = nfsd4_decode_share_access(argp, &open->op_share_access,
+ &open->op_deleg_want, &dummy);
if (status)
goto xdr_error;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
@@ -755,14 +749,14 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
goto out;
break;
case NFS4_CREATE_EXCLUSIVE:
- READ_BUF(8);
- COPYMEM(open->op_verf.data, 8);
+ READ_BUF(NFS4_VERIFIER_SIZE);
+ COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if (argp->minorversion < 1)
goto xdr_error;
- READ_BUF(8);
- COPYMEM(open->op_verf.data, 8);
+ READ_BUF(NFS4_VERIFIER_SIZE);
+ COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl);
if (status)
@@ -848,7 +842,8 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
return status;
READ_BUF(4);
READ32(open_down->od_seqid);
- status = nfsd4_decode_share_access(argp, &open_down->od_share_access);
+ status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
+ &open_down->od_deleg_want, NULL);
if (status)
return status;
status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
@@ -994,8 +989,8 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
{
DECODE_HEAD;
- READ_BUF(8);
- COPYMEM(setclientid->se_verf.data, 8);
+ READ_BUF(NFS4_VERIFIER_SIZE);
+ COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
@@ -1020,9 +1015,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
{
DECODE_HEAD;
- READ_BUF(8 + sizeof(nfs4_verifier));
+ READ_BUF(8 + NFS4_VERIFIER_SIZE);
COPYMEM(&scd_c->sc_clientid, 8);
- COPYMEM(&scd_c->sc_confirm, sizeof(nfs4_verifier));
+ COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
DECODE_TAIL;
}
@@ -1385,26 +1380,29 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
{
- unsigned int nbytes;
- stateid_t si;
int i;
- __be32 *p;
- __be32 status;
+ __be32 *p, status;
+ struct nfsd4_test_stateid_id *stateid;
READ_BUF(4);
test_stateid->ts_num_ids = ntohl(*p++);
- nbytes = test_stateid->ts_num_ids * sizeof(stateid_t);
- if (nbytes > (u32)((char *)argp->end - (char *)argp->p))
- goto xdr_error;
-
- test_stateid->ts_saved_args = argp;
- save_buf(argp, &test_stateid->ts_savedp);
+ INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
- status = nfsd4_decode_stateid(argp, &si);
+ stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL);
+ if (!stateid) {
+ status = PTR_ERR(stateid);
+ goto out;
+ }
+
+ defer_free(argp, kfree, stateid);
+ INIT_LIST_HEAD(&stateid->ts_id_list);
+ list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
+
+ status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid);
if (status)
- return status;
+ goto out;
}
status = 0;
@@ -2661,8 +2659,8 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
__be32 *p;
if (!nfserr) {
- RESERVE_SPACE(8);
- WRITEMEM(commit->co_verf.data, 8);
+ RESERVE_SPACE(NFS4_VERIFIER_SIZE);
+ WRITEMEM(commit->co_verf.data, NFS4_VERIFIER_SIZE);
ADJUST_ARGS();
}
return nfserr;
@@ -2851,6 +2849,20 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
WRITE32(0); /* XXX: is NULL principal ok? */
ADJUST_ARGS();
break;
+ case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
+ switch (open->op_why_no_deleg) {
+ case WND4_CONTENTION:
+ case WND4_RESOURCE:
+ RESERVE_SPACE(8);
+ WRITE32(open->op_why_no_deleg);
+ WRITE32(0); /* deleg signaling not supported yet */
+ break;
+ default:
+ RESERVE_SPACE(4);
+ WRITE32(open->op_why_no_deleg);
+ }
+ ADJUST_ARGS();
+ break;
default:
BUG();
}
@@ -3008,7 +3020,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
if (resp->xbuf->page_len)
return nfserr_resource;
- RESERVE_SPACE(8); /* verifier */
+ RESERVE_SPACE(NFS4_VERIFIER_SIZE);
savep = p;
/* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
@@ -3209,9 +3221,9 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n
__be32 *p;
if (!nfserr) {
- RESERVE_SPACE(8 + sizeof(nfs4_verifier));
+ RESERVE_SPACE(8 + NFS4_VERIFIER_SIZE);
WRITEMEM(&scd->se_clientid, 8);
- WRITEMEM(&scd->se_confirm, sizeof(nfs4_verifier));
+ WRITEMEM(&scd->se_confirm, NFS4_VERIFIER_SIZE);
ADJUST_ARGS();
}
else if (nfserr == nfserr_clid_inuse) {
@@ -3232,7 +3244,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
RESERVE_SPACE(16);
WRITE32(write->wr_bytes_written);
WRITE32(write->wr_how_written);
- WRITEMEM(write->wr_verifier.data, 8);
+ WRITEMEM(write->wr_verifier.data, NFS4_VERIFIER_SIZE);
ADJUST_ARGS();
}
return nfserr;
@@ -3391,30 +3403,17 @@ __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_test_stateid *test_stateid)
{
- struct nfsd4_compoundargs *argp;
- struct nfs4_client *cl = resp->cstate.session->se_client;
- stateid_t si;
+ struct nfsd4_test_stateid_id *stateid, *next;
__be32 *p;
- int i;
- int valid;
-
- restore_buf(test_stateid->ts_saved_args, &test_stateid->ts_savedp);
- argp = test_stateid->ts_saved_args;
- RESERVE_SPACE(4);
+ RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids));
*p++ = htonl(test_stateid->ts_num_ids);
- resp->p = p;
- nfs4_lock_state();
- for (i = 0; i < test_stateid->ts_num_ids; i++) {
- nfsd4_decode_stateid(argp, &si);
- valid = nfs4_validate_stateid(cl, &si);
- RESERVE_SPACE(4);
- *p++ = htonl(valid);
- resp->p = p;
+ list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
+ *p++ = htonl(stateid->ts_id_status);
}
- nfs4_unlock_state();
+ ADJUST_ARGS();
return nfserr;
}
@@ -3532,7 +3531,7 @@ int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
if (length > session->se_fchannel.maxresp_sz)
return nfserr_rep_too_big;
- if (slot->sl_cachethis == 1 &&
+ if ((slot->sl_flags & NFSD4_SLOT_CACHETHIS) &&
length > session->se_fchannel.maxresp_cached)
return nfserr_rep_too_big_to_cache;
@@ -3656,8 +3655,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
if (nfsd4_has_session(cs)) {
if (cs->status != nfserr_replay_cache) {
nfsd4_store_cache_entry(resp);
- dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
- cs->slot->sl_inuse = false;
+ cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
}
/* Renew the clientid on success and on replay */
release_session_client(cs->session);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 64c24af8d7ea..2c53be6d3579 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,12 +13,14 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/module.h>
#include "idmap.h"
#include "nfsd.h"
#include "cache.h"
#include "fault_inject.h"
+#include "netns.h"
/*
* We have a single directory with several nodes in it.
@@ -1124,14 +1126,26 @@ static int create_proc_exports_entry(void)
}
#endif
+int nfsd_net_id;
+static struct pernet_operations nfsd_net_ops = {
+ .id = &nfsd_net_id,
+ .size = sizeof(struct nfsd_net),
+};
+
static int __init init_nfsd(void)
{
int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
- retval = nfsd4_init_slabs();
+ retval = register_cld_notifier();
if (retval)
return retval;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
+ goto out_unregister_notifier;
+ retval = nfsd4_init_slabs();
+ if (retval)
+ goto out_unregister_pernet;
nfs4_state_init();
retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
if (retval)
@@ -1169,6 +1183,10 @@ out_free_stat:
nfsd_fault_inject_cleanup();
out_free_slabs:
nfsd4_free_slabs();
+out_unregister_pernet:
+ unregister_pernet_subsys(&nfsd_net_ops);
+out_unregister_notifier:
+ unregister_cld_notifier();
return retval;
}
@@ -1184,6 +1202,8 @@ static void __exit exit_nfsd(void)
nfsd4_free_slabs();
nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type);
+ unregister_pernet_subsys(&nfsd_net_ops);
+ unregister_cld_notifier();
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 1d1e8589b4ce..1671429ffa66 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -364,12 +364,17 @@ static inline u32 nfsd_suppattrs2(u32 minorversion)
NFSD_WRITEABLE_ATTRS_WORD2
extern int nfsd4_is_junction(struct dentry *dentry);
-#else
+extern int register_cld_notifier(void);
+extern void unregister_cld_notifier(void);
+#else /* CONFIG_NFSD_V4 */
static inline int nfsd4_is_junction(struct dentry *dentry)
{
return 0;
}
+#define register_cld_notifier() 0
+#define unregister_cld_notifier() do { } while(0)
+
#endif /* CONFIG_NFSD_V4 */
#endif /* LINUX_NFSD_NFSD_H */
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index fce472f5f39e..28dfad39f0c5 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -307,33 +307,37 @@ static void set_max_drc(void)
dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
}
-int nfsd_create_serv(void)
+static int nfsd_get_default_max_blksize(void)
{
- int err = 0;
+ struct sysinfo i;
+ unsigned long long target;
+ unsigned long ret;
+
+ si_meminfo(&i);
+ target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
+ /*
+ * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
+ * machines, but only uses 32K on 128M machines. Bottom out at
+ * 8K on 32M and smaller. Of course, this is only a default.
+ */
+ target >>= 12;
+
+ ret = NFSSVC_MAXBLKSIZE;
+ while (ret > target && ret >= 8*1024*2)
+ ret /= 2;
+ return ret;
+}
+int nfsd_create_serv(void)
+{
WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nfsd_serv) {
svc_get(nfsd_serv);
return 0;
}
- if (nfsd_max_blksize == 0) {
- /* choose a suitable default */
- struct sysinfo i;
- si_meminfo(&i);
- /* Aim for 1/4096 of memory per thread
- * This gives 1MB on 4Gig machines
- * But only uses 32K on 128M machines.
- * Bottom out at 8K on 32M and smaller.
- * Of course, this is only a default.
- */
- nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
- i.totalram <<= PAGE_SHIFT - 12;
- while (nfsd_max_blksize > i.totalram &&
- nfsd_max_blksize >= 8*1024*2)
- nfsd_max_blksize /= 2;
- }
+ if (nfsd_max_blksize == 0)
+ nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions();
-
nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
nfsd_last_thread, nfsd, THIS_MODULE);
if (nfsd_serv == NULL)
@@ -341,7 +345,7 @@ int nfsd_create_serv(void)
set_max_drc();
do_gettimeofday(&nfssvc_boot); /* record boot time */
- return err;
+ return 0;
}
int nfsd_nrpools(void)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index ffb5df1db94f..89ab137d379a 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -128,12 +128,14 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
(NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE)
struct nfsd4_slot {
- bool sl_inuse;
- bool sl_cachethis;
- u16 sl_opcnt;
u32 sl_seqid;
__be32 sl_status;
u32 sl_datalen;
+ u16 sl_opcnt;
+#define NFSD4_SLOT_INUSE (1 << 0)
+#define NFSD4_SLOT_CACHETHIS (1 << 1)
+#define NFSD4_SLOT_INITIALIZED (1 << 2)
+ u8 sl_flags;
char sl_data[];
};
@@ -196,18 +198,7 @@ struct nfsd4_session {
struct nfsd4_slot *se_slots[]; /* forward channel slots */
};
-static inline void
-nfsd4_put_session(struct nfsd4_session *ses)
-{
- extern void free_session(struct kref *kref);
- kref_put(&ses->se_ref, free_session);
-}
-
-static inline void
-nfsd4_get_session(struct nfsd4_session *ses)
-{
- kref_get(&ses->se_ref);
-}
+extern void nfsd4_put_session(struct nfsd4_session *ses);
/* formatted contents of nfs4_sessionid */
struct nfsd4_sessionid {
@@ -245,14 +236,17 @@ struct nfs4_client {
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- u32 cl_firststate; /* recovery dir creation */
u32 cl_minorversion;
/* for v4.0 and v4.1 callbacks: */
struct nfs4_cb_conn cl_cb_conn;
-#define NFSD4_CLIENT_CB_UPDATE 1
-#define NFSD4_CLIENT_KILL 2
- unsigned long cl_cb_flags;
+#define NFSD4_CLIENT_CB_UPDATE (0)
+#define NFSD4_CLIENT_CB_KILL (1)
+#define NFSD4_CLIENT_STABLE (2) /* client on stable storage */
+#define NFSD4_CLIENT_RECLAIM_COMPLETE (3) /* reclaim_complete done */
+#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
+ 1 << NFSD4_CLIENT_CB_KILL)
+ unsigned long cl_flags;
struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident;
#define NFSD4_CB_UP 0
@@ -463,6 +457,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
+extern void nfs4_release_reclaim(void);
+extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct nfs4_client *crp);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
extern void nfs4_free_openowner(struct nfs4_openowner *);
extern void nfs4_free_lockowner(struct nfs4_lockowner *);
@@ -477,16 +473,17 @@ extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
-extern void nfsd4_init_recdir(void);
-extern int nfsd4_recdir_load(void);
-extern void nfsd4_shutdown_recdir(void);
extern int nfs4_client_to_reclaim(const char *name);
extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
-extern void nfsd4_recdir_purge_old(void);
-extern void nfsd4_create_clid_dir(struct nfs4_client *clp);
-extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
extern void release_session_client(struct nfsd4_session *);
extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *);
extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
+/* nfs4recover operations */
+extern int nfsd4_client_tracking_init(struct net *net);
+extern void nfsd4_client_tracking_exit(struct net *net);
+extern void nfsd4_client_record_create(struct nfs4_client *clp);
+extern void nfsd4_client_record_remove(struct nfs4_client *clp);
+extern int nfsd4_client_record_check(struct nfs4_client *clp);
+extern void nfsd4_record_grace_done(struct net *net, time_t boot_time);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index e59f71d0cf73..296d671654d6 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -737,12 +737,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
/*
* Open an existing file or directory.
- * The access argument indicates the type of open (read/write/lock)
+ * The may_flags argument indicates the type of open (read/write/lock)
+ * and additional flags.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- int access, struct file **filp)
+ int may_flags, struct file **filp)
{
struct dentry *dentry;
struct inode *inode;
@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
* and (hopefully) checked permission - so allow OWNER_OVERRIDE
* in case a chmod has now revoked permission.
*/
- err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE);
+ err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
if (err)
goto out;
@@ -768,7 +769,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
* or any access when mandatory locking enabled
*/
err = nfserr_perm;
- if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE))
+ if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
goto out;
/*
* We must ignore files (but only files) which might have mandatory
@@ -781,12 +782,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
if (!inode->i_fop)
goto out;
- host_err = nfsd_open_break_lease(inode, access);
+ host_err = nfsd_open_break_lease(inode, may_flags);
if (host_err) /* NOMEM or WOULDBLOCK */
goto out_nfserr;
- if (access & NFSD_MAY_WRITE) {
- if (access & NFSD_MAY_READ)
+ if (may_flags & NFSD_MAY_WRITE) {
+ if (may_flags & NFSD_MAY_READ)
flags = O_RDWR|O_LARGEFILE;
else
flags = O_WRONLY|O_LARGEFILE;
@@ -795,8 +796,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
- else
- host_err = ima_file_check(*filp, access);
+ else {
+ host_err = ima_file_check(*filp, may_flags);
+
+ if (may_flags & NFSD_MAY_64BIT_COOKIE)
+ (*filp)->f_mode |= FMODE_64BITHASH;
+ else
+ (*filp)->f_mode |= FMODE_32BITHASH;
+ }
+
out_nfserr:
err = nfserrno(host_err);
out:
@@ -2021,8 +2029,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
__be32 err;
struct file *file;
loff_t offset = *offsetp;
+ int may_flags = NFSD_MAY_READ;
+
+ /* NFSv2 only supports 32 bit cookies */
+ if (rqstp->rq_vers > 2)
+ may_flags |= NFSD_MAY_64BIT_COOKIE;
- err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file);
+ err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
if (err)
goto out;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 1dcd238e11a0..ec0611b2b738 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -27,6 +27,8 @@
#define NFSD_MAY_BYPASS_GSS 0x400
#define NFSD_MAY_READ_IF_EXEC 0x800
+#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
+
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 2364747ee97d..1b3501598ab5 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -43,6 +43,13 @@
#define NFSD4_MAX_TAGLEN 128
#define XDR_LEN(n) (((n) + 3) & ~3)
+#define CURRENT_STATE_ID_FLAG (1<<0)
+#define SAVED_STATE_ID_FLAG (1<<1)
+
+#define SET_STATE_ID(c, f) ((c)->sid_flags |= (f))
+#define HAS_STATE_ID(c, f) ((c)->sid_flags & (f))
+#define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f))
+
struct nfsd4_compound_state {
struct svc_fh current_fh;
struct svc_fh save_fh;
@@ -54,6 +61,10 @@ struct nfsd4_compound_state {
size_t iovlen;
u32 minorversion;
u32 status;
+ stateid_t current_stateid;
+ stateid_t save_stateid;
+ /* to indicate current and saved state id presents */
+ u32 sid_flags;
};
static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
@@ -212,16 +223,19 @@ struct nfsd4_open {
struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */
u32 op_delegate_type; /* request - CLAIM_PREV only */
stateid_t op_delegate_stateid; /* request - response */
+ u32 op_why_no_deleg; /* response - DELEG_NONE_EXT only */
u32 op_create; /* request */
u32 op_createmode; /* request */
u32 op_bmval[3]; /* request */
struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
- nfs4_verifier verf; /* EXCLUSIVE4 */
+ nfs4_verifier op_verf __attribute__((aligned(32)));
+ /* EXCLUSIVE4 */
clientid_t op_clientid; /* request */
struct xdr_netobj op_owner; /* request */
u32 op_seqid; /* request */
u32 op_share_access; /* request */
u32 op_share_deny; /* request */
+ u32 op_deleg_want; /* request */
stateid_t op_stateid; /* response */
u32 op_recall; /* recall */
struct nfsd4_change_info op_cinfo; /* response */
@@ -234,7 +248,6 @@ struct nfsd4_open {
struct nfs4_acl *op_acl;
};
#define op_iattr iattr
-#define op_verf verf
struct nfsd4_open_confirm {
stateid_t oc_req_stateid /* request */;
@@ -245,8 +258,9 @@ struct nfsd4_open_confirm {
struct nfsd4_open_downgrade {
stateid_t od_stateid;
u32 od_seqid;
- u32 od_share_access;
- u32 od_share_deny;
+ u32 od_share_access; /* request */
+ u32 od_deleg_want; /* request */
+ u32 od_share_deny; /* request */
};
@@ -343,10 +357,15 @@ struct nfsd4_saved_compoundargs {
struct page **pagelist;
};
+struct nfsd4_test_stateid_id {
+ __be32 ts_id_status;
+ stateid_t ts_id_stateid;
+ struct list_head ts_id_list;
+};
+
struct nfsd4_test_stateid {
__be32 ts_num_ids;
- struct nfsd4_compoundargs *ts_saved_args;
- struct nfsd4_saved_compoundargs ts_savedp;
+ struct list_head ts_stateid_list;
};
struct nfsd4_free_stateid {
@@ -503,7 +522,8 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
{
- return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp);
+ return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
+ || nfsd4_is_solo_sequence(resp);
}
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
diff --git a/fs/open.c b/fs/open.c
index 77becc041149..5720854156db 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -836,7 +836,7 @@ EXPORT_SYMBOL(dentry_open);
static void __put_unused_fd(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = files_fdtable(files);
- __FD_CLR(fd, fdt->open_fds);
+ __clear_open_fd(fd, fdt);
if (fd < files->next_fd)
files->next_fd = fd;
}
@@ -1080,7 +1080,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
if (!filp)
goto out_unlock;
rcu_assign_pointer(fdt->fd[fd], NULL);
- FD_CLR(fd, fdt->close_on_exec);
+ __clear_close_on_exec(fd, fdt);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
retval = filp_close(filp, files);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3b42c1418f31..1c8b280146d7 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1753,7 +1753,7 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
fdt = files_fdtable(files);
f_flags = file->f_flags & ~O_CLOEXEC;
- if (FD_ISSET(fd, fdt->close_on_exec))
+ if (close_on_exec(fd, fdt))
f_flags |= O_CLOEXEC;
if (path) {
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c283832d411d..2b9a7607cbd5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -783,7 +783,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
- spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge_lock(pmd, vma) == 1) {
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
index 71e2b4d50a0a..f86f51f99ace 100644
--- a/fs/romfs/storage.c
+++ b/fs/romfs/storage.c
@@ -19,7 +19,7 @@
#endif
#ifdef CONFIG_ROMFS_ON_MTD
-#define ROMFS_MTD_READ(sb, ...) ((sb)->s_mtd->read((sb)->s_mtd, ##__VA_ARGS__))
+#define ROMFS_MTD_READ(sb, ...) mtd_read((sb)->s_mtd, ##__VA_ARGS__)
/*
* read data from an romfs image on an MTD device
diff --git a/fs/select.c b/fs/select.c
index 6fb8943d580b..17d33d09fc16 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -348,7 +348,7 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
set = ~(~0UL << (n & (__NFDBITS-1)));
n /= __NFDBITS;
fdt = files_fdtable(current->files);
- open_fds = fdt->open_fds->fds_bits+n;
+ open_fds = fdt->open_fds + n;
max = 0;
if (set) {
set &= BITS(fds, n);
diff --git a/drivers/acpi/acpica/acconfig.h b/include/acpi/acconfig.h
index 1f30af613e87..03f14856bd09 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -85,6 +85,23 @@
*/
#define ACPI_CHECKSUM_ABORT FALSE
+/*
+ * Generate a version of ACPICA that only supports "reduced hardware"
+ * platforms (as defined in ACPI 5.0). Set to TRUE to generate a specialized
+ * version of ACPICA that ONLY supports the ACPI 5.0 "reduced hardware"
+ * model. In other words, no ACPI hardware is supported.
+ *
+ * If TRUE, this means no support for the following:
+ * PM Event and Control registers
+ * SCI interrupt (and handler)
+ * Fixed Events
+ * General Purpose Events (GPEs)
+ * Global Lock
+ * ACPI PM timer
+ * FACS table (Waking vectors and Global Lock)
+ */
+#define ACPI_REDUCED_HARDWARE FALSE
+
/******************************************************************************
*
* Subsystem Constants
@@ -93,7 +110,7 @@
/* Version of ACPI supported */
-#define ACPI_CA_SUPPORT_LEVEL 3
+#define ACPI_CA_SUPPORT_LEVEL 5
/* Maximum count for a semaphore object */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 5b6c391efc8e..92d6e1d701ff 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -57,6 +57,7 @@
#define ACPI_SUCCESS(a) (!(a))
#define ACPI_FAILURE(a) (a)
+#define ACPI_SKIP(a) (a == AE_CTRL_SKIP)
#define AE_OK (acpi_status) 0x0000
/*
@@ -89,8 +90,9 @@
#define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
#define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
+#define AE_NOT_CONFIGURED (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
-#define AE_CODE_ENV_MAX 0x001B
+#define AE_CODE_ENV_MAX 0x001C
/*
* Programmer exceptions
@@ -213,7 +215,8 @@ char const *acpi_gbl_exception_names_env[] = {
"AE_ABORT_METHOD",
"AE_SAME_HANDLER",
"AE_NO_HANDLER",
- "AE_OWNER_ID_LIMIT"
+ "AE_OWNER_ID_LIMIT",
+ "AE_NOT_CONFIGURED"
};
char const *acpi_gbl_exception_names_pgm[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 5b5af0d30a97..38f508816e4a 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -46,6 +46,7 @@
/* Method names - these methods can appear anywhere in the namespace */
+#define METHOD_NAME__SB_ "_SB_"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__CID "_CID"
#define METHOD_NAME__UID "_UID"
@@ -64,11 +65,11 @@
/* Method names - these methods must appear at the namespace root */
-#define METHOD_NAME__BFS "\\_BFS"
-#define METHOD_NAME__GTS "\\_GTS"
-#define METHOD_NAME__PTS "\\_PTS"
-#define METHOD_NAME__SST "\\_SI._SST"
-#define METHOD_NAME__WAK "\\_WAK"
+#define METHOD_PATHNAME__BFS "\\_BFS"
+#define METHOD_PATHNAME__GTS "\\_GTS"
+#define METHOD_PATHNAME__PTS "\\_PTS"
+#define METHOD_PATHNAME__SST "\\_SI._SST"
+#define METHOD_PATHNAME__WAK "\\_WAK"
/* Definitions of the predefined namespace names */
@@ -79,6 +80,5 @@
#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */
#define ACPI_NS_ROOT_PATH "\\"
-#define ACPI_NS_SYSTEM_BUS "_SB_"
#endif /* __ACNAMES_H__ */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 6cd5b6403a7b..f1c8ca60e824 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -323,6 +323,8 @@ int acpi_bus_set_power(acpi_handle handle, int state);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
bool acpi_bus_can_wakeup(acpi_handle handle);
+int acpi_power_resource_register_device(struct device *dev, acpi_handle handle);
+void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle);
#ifdef CONFIG_ACPI_PROC_EVENT
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
@@ -392,8 +394,13 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
#endif
#ifdef CONFIG_PM_SLEEP
+int acpi_pm_device_run_wake(struct device *, bool);
int acpi_pm_device_sleep_wake(struct device *, bool);
#else
+static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
return -ENODEV;
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 7c9aebe8a7aa..21a5548c6686 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -95,6 +95,11 @@ acpi_status
acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table);
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address * new_address,
+ u32 *new_table_length);
+
/*
* Spinlock primitives
*/
@@ -217,14 +222,10 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
* Platform and hardware-independent physical memory interfaces
*/
acpi_status
-acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width);
-acpi_status
-acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width);
+acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
acpi_status
-acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
-acpi_status
-acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width);
+acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width);
/*
* Platform and hardware-independent PCI configuration space access
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index a28da35ba45e..982110134672 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,8 +47,9 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20120111
+#define ACPI_CA_VERSION 0x20120320
+#include "acconfig.h"
#include "actypes.h"
#include "actbl.h"
@@ -71,6 +72,33 @@ extern u8 acpi_gbl_copy_dsdt_locally;
extern u8 acpi_gbl_truncate_io_addresses;
extern u8 acpi_gbl_disable_auto_repair;
+/*
+ * Hardware-reduced prototypes. All interfaces that use these macros will
+ * be configured out of the ACPICA build if the ACPI_REDUCED_HARDWARE flag
+ * is set to TRUE.
+ */
+#if (!ACPI_REDUCED_HARDWARE)
+#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
+ prototype;
+
+#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
+ prototype;
+
+#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
+ prototype;
+
+#else
+#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
+ static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);}
+
+#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
+ static ACPI_INLINE prototype {return(AE_OK);}
+
+#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
+ static ACPI_INLINE prototype {}
+
+#endif /* !ACPI_REDUCED_HARDWARE */
+
extern u32 acpi_current_gpe_count;
extern struct acpi_table_fadt acpi_gbl_FADT;
extern u8 acpi_gbl_system_awake_and_running;
@@ -96,9 +124,8 @@ acpi_status acpi_terminate(void);
acpi_status acpi_subsystem_status(void);
#endif
-acpi_status acpi_enable(void);
-
-acpi_status acpi_disable(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
#ifdef ACPI_FUTURE_USAGE
acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
@@ -235,17 +262,34 @@ acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
acpi_status
acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
-acpi_status
-acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler,
- void *context);
-
-acpi_status
-acpi_install_fixed_event_handler(u32 acpi_event,
- acpi_event_handler handler, void *context);
-
-acpi_status
-acpi_remove_fixed_event_handler(u32 acpi_event, acpi_event_handler handler);
-
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_global_event_handler
+ (ACPI_GBL_EVENT_HANDLER handler, void *context))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_fixed_event_handler(u32
+ acpi_event,
+ acpi_event_handler
+ handler,
+ void
+ *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_remove_fixed_event_handler(u32 acpi_event,
+ acpi_event_handler
+ handler))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_gpe_handler(acpi_handle
+ gpe_device,
+ u32 gpe_number,
+ u32 type,
+ acpi_gpe_handler
+ address,
+ void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_remove_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ acpi_gpe_handler
+ address))
acpi_status
acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
@@ -266,15 +310,6 @@ acpi_remove_address_space_handler(acpi_handle device,
acpi_adr_space_type space_id,
acpi_adr_space_handler handler);
-acpi_status
-acpi_install_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number,
- u32 type, acpi_gpe_handler address, void *context);
-
-acpi_status
-acpi_remove_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number, acpi_gpe_handler address);
-
#ifdef ACPI_FUTURE_USAGE
acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
#endif
@@ -284,9 +319,11 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
/*
* Global Lock interfaces
*/
-acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
-
-acpi_status acpi_release_global_lock(u32 handle);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_acquire_global_lock(u16 timeout,
+ u32 *handle))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_release_global_lock(u32 handle))
/*
* Interfaces to AML mutex objects
@@ -299,47 +336,75 @@ acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname);
/*
* Fixed Event interfaces
*/
-acpi_status acpi_enable_event(u32 event, u32 flags);
-
-acpi_status acpi_disable_event(u32 event, u32 flags);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_enable_event(u32 event, u32 flags))
-acpi_status acpi_clear_event(u32 event);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_disable_event(u32 event, u32 flags))
-acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_get_event_status(u32 event,
+ acpi_event_status
+ *event_status))
/*
* General Purpose Event (GPE) Interfaces
*/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
-
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
-
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
-
-acpi_status
-acpi_setup_gpe_for_wake(acpi_handle parent_device,
- acpi_handle gpe_device, u32 gpe_number);
-
-acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action);
-
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number, acpi_event_status *event_status);
-
-acpi_status acpi_disable_all_gpes(void);
-
-acpi_status acpi_enable_all_runtime_gpes(void);
-
-acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device);
-
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
- struct acpi_generic_address *gpe_block_address,
- u32 register_count, u32 interrupt_number);
-
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
-
-acpi_status acpi_update_all_gpes(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_update_all_gpes(void))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_enable_gpe(acpi_handle gpe_device,
+ u32 gpe_number))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_disable_gpe(acpi_handle gpe_device,
+ u32 gpe_number))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_clear_gpe(acpi_handle gpe_device,
+ u32 gpe_number))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_set_gpe(acpi_handle gpe_device,
+ u32 gpe_number, u8 action))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_finish_gpe(acpi_handle gpe_device,
+ u32 gpe_number))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_setup_gpe_for_wake(acpi_handle
+ parent_device,
+ acpi_handle gpe_device,
+ u32 gpe_number))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_set_gpe_wake_mask(acpi_handle gpe_device,
+ u32 gpe_number,
+ u8 action))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_get_gpe_status(acpi_handle gpe_device,
+ u32 gpe_number,
+ acpi_event_status
+ *event_status))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_get_gpe_device(u32 gpe_index,
+ acpi_handle * gpe_device))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_gpe_block(acpi_handle gpe_device,
+ struct
+ acpi_generic_address
+ *gpe_block_address,
+ u32 register_count,
+ u32 interrupt_number))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_remove_gpe_block(acpi_handle gpe_device))
/*
* Resource interfaces
@@ -391,34 +456,60 @@ acpi_buffer_to_resource(u8 *aml_buffer,
*/
acpi_status acpi_reset(void);
-acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_read_bit_register(u32 register_id,
+ u32 *return_value))
-acpi_status acpi_write_bit_register(u32 register_id, u32 value);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_write_bit_register(u32 register_id,
+ u32 value))
-acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector(u32
+ physical_address))
#if ACPI_MACHINE_WIDTH == 64
-acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector64(u64
+ physical_address))
#endif
acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
+/*
+ * Sleep/Wake interfaces
+ */
acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
acpi_status acpi_leave_sleep_state(u8 sleep_state);
/*
+ * ACPI Timer interfaces
+ */
+#ifdef ACPI_FUTURE_USAGE
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_get_timer_resolution(u32 *resolution))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_get_timer_duration(u32 start_ticks,
+ u32 end_ticks,
+ u32 *time_elapsed))
+#endif /* ACPI_FUTURE_USAGE */
+
+/*
* Error/Warning output
*/
void ACPI_INTERNAL_VAR_XFACE
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 8e1b92f6f650..8dea54665dcf 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -309,6 +309,13 @@ enum acpi_prefered_pm_profiles {
PM_TABLET = 8
};
+/* Values for sleep_status and sleep_control registers (V5 FADT) */
+
+#define ACPI_X_WAKE_STATUS 0x80
+#define ACPI_X_SLEEP_TYPE_MASK 0x1C
+#define ACPI_X_SLEEP_TYPE_POSITION 0x02
+#define ACPI_X_SLEEP_ENABLE 0x20
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index d5dee7ce9474..eba66043cf1b 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -518,6 +518,13 @@ typedef u64 acpi_integer;
#define ACPI_SLEEP_TYPE_INVALID 0xFF
/*
+ * Sleep/Wake flags
+ */
+#define ACPI_NO_OPTIONAL_METHODS 0x00 /* Do not execute any optional methods */
+#define ACPI_EXECUTE_GTS 0x01 /* For enter sleep interface */
+#define ACPI_EXECUTE_BFS 0x02 /* For leave sleep prep interface */
+
+/*
* Standard notify values
*/
#define ACPI_NOTIFY_BUS_CHECK (u8) 0x00
@@ -532,8 +539,9 @@ typedef u64 acpi_integer;
#define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09
#define ACPI_NOTIFY_RESERVED (u8) 0x0A
#define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B
+#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
-#define ACPI_NOTIFY_MAX 0x0B
+#define ACPI_NOTIFY_MAX 0x0C
/*
* Types associated with ACPI names and objects. The first group of
@@ -698,7 +706,8 @@ typedef u32 acpi_event_status;
#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3
-#define ACPI_MAX_SYS_NOTIFY 0x7f
+#define ACPI_MAX_SYS_NOTIFY 0x7F
+#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF
/* Address Space (Operation Region) Types */
@@ -786,6 +795,15 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ENABLE_EVENT 1
#define ACPI_DISABLE_EVENT 0
+/* Sleep function dispatch */
+
+typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
+
+struct acpi_sleep_functions {
+ ACPI_SLEEP_FUNCTION legacy_function;
+ ACPI_SLEEP_FUNCTION extended_function;
+};
+
/*
* External ACPI object definition
*/
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
index 3dab00860e71..91d44bd4dde3 100644
--- a/include/asm-generic/posix_types.h
+++ b/include/asm-generic/posix_types.h
@@ -10,8 +10,13 @@
* architectures, so that you can override them.
*/
+#ifndef __kernel_long_t
+typedef long __kernel_long_t;
+typedef unsigned long __kernel_ulong_t;
+#endif
+
#ifndef __kernel_ino_t
-typedef unsigned long __kernel_ino_t;
+typedef __kernel_ulong_t __kernel_ino_t;
#endif
#ifndef __kernel_mode_t
@@ -19,7 +24,7 @@ typedef unsigned int __kernel_mode_t;
#endif
#ifndef __kernel_nlink_t
-typedef unsigned long __kernel_nlink_t;
+typedef __kernel_ulong_t __kernel_nlink_t;
#endif
#ifndef __kernel_pid_t
@@ -36,7 +41,7 @@ typedef unsigned int __kernel_gid_t;
#endif
#ifndef __kernel_suseconds_t
-typedef long __kernel_suseconds_t;
+typedef __kernel_long_t __kernel_suseconds_t;
#endif
#ifndef __kernel_daddr_t
@@ -44,8 +49,8 @@ typedef int __kernel_daddr_t;
#endif
#ifndef __kernel_uid32_t
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
#endif
#ifndef __kernel_old_uid_t
@@ -67,99 +72,29 @@ typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
#else
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
+typedef __kernel_ulong_t __kernel_size_t;
+typedef __kernel_long_t __kernel_ssize_t;
+typedef __kernel_long_t __kernel_ptrdiff_t;
#endif
#endif
+#ifndef __kernel_fsid_t
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+#endif
+
/*
* anything below here should be completely generic
*/
-typedef long __kernel_off_t;
+typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_clock_t;
+typedef __kernel_long_t __kernel_time_t;
+typedef __kernel_long_t __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static inline void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* __KERNEL__ */
-
#endif /* __ASM_GENERIC_POSIX_TYPES_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 2292d1af9d70..991ef01cd77e 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -218,7 +218,7 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
/* fs/sendfile.c */
#define __NR3264_sendfile 71
-__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
+__SYSCALL(__NR3264_sendfile, sys_sendfile64)
/* fs/select.c */
#define __NR_pselect6 72
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a25555381097..a4b5da2b83f5 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -357,6 +357,7 @@ header-y += suspend_ioctls.h
header-y += swab.h
header-y += synclink.h
header-y += sysctl.h
+header-y += sysinfo.h
header-y += taskstats.h
header-y += tcp.h
header-y += telephony.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f53fea61f40a..f421dd84f29d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -372,4 +372,14 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
+ u32 pm1a_ctrl, u32 pm1b_ctrl));
+
+acpi_status acpi_os_prepare_sleep(u8 sleep_state,
+ u32 pm1a_control, u32 pm1b_control);
+#else
+#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aio_abi.h b/include/linux/aio_abi.h
index 2c8731664180..86fa7a71336a 100644
--- a/include/linux/aio_abi.h
+++ b/include/linux/aio_abi.h
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
-typedef unsigned long aio_context_t;
+typedef __kernel_ulong_t aio_context_t;
enum {
IOCB_CMD_PREAD = 0,
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 033f6aa670de..e64ce2cfee99 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,9 +47,6 @@ enum {
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
* @cctl_opt: default options for the channel control register
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
@@ -68,7 +65,6 @@ struct pl08x_channel_data {
int max_signal;
u32 muxval;
u32 cctl;
- bool device_fc;
dma_addr_t addr;
bool circular_buffer;
bool single;
@@ -176,13 +172,15 @@ enum pl08x_dma_chan_state {
* @runtime_addr: address for RX/TX according to the runtime config
* @runtime_direction: current direction of this channel according to
* runtime config
- * @lc: last completed transaction on this channel
* @pend_list: queued transactions pending on this channel
* @at: active transaction on this channel
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
+ * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
+ * channels. Fill with 'true' if peripheral should be flow controller. Direction
+ * will be selected at Runtime.
* @waiting: a TX descriptor on this channel which is waiting for a physical
* channel to become available
*/
@@ -198,13 +196,13 @@ struct pl08x_dma_chan {
u32 src_cctl;
u32 dst_cctl;
enum dma_transfer_direction runtime_direction;
- dma_cookie_t lc;
struct list_head pend_list;
struct pl08x_txd *at;
spinlock_t lock;
struct pl08x_driver_data *host;
enum pl08x_dma_chan_state state;
bool slave;
+ bool device_fc;
struct pl08x_txd *waiting;
};
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
index 12e023c19ac1..fe93758e8403 100644
--- a/include/linux/amba/pl330.h
+++ b/include/linux/amba/pl330.h
@@ -13,7 +13,6 @@
#define __AMBA_PL330_H_
#include <linux/dmaengine.h>
-#include <asm/hardware/pl330.h>
struct dma_pl330_platdata {
/*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7e05fcee75a1..5d46217f84ad 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -19,6 +19,10 @@
#include <asm/siginfo.h>
#include <asm/signal.h>
+#ifndef COMPAT_USE_64BIT_TIME
+#define COMPAT_USE_64BIT_TIME 0
+#endif
+
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -83,10 +87,26 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+/*
+ * These functions operate strictly on struct compat_time*
+ */
extern int get_compat_timespec(struct timespec *,
const struct compat_timespec __user *);
extern int put_compat_timespec(const struct timespec *,
struct compat_timespec __user *);
+extern int get_compat_timeval(struct timeval *,
+ const struct compat_timeval __user *);
+extern int put_compat_timeval(const struct timeval *,
+ struct compat_timeval __user *);
+/*
+ * These functions operate on 32- or 64-bit specs depending on
+ * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments and the
+ * naming as compat_get/put_ rather than get/put_compat_.
+ */
+extern int compat_get_timespec(struct timespec *, const void __user *);
+extern int compat_put_timespec(const struct timespec *, void __user *);
+extern int compat_get_timeval(struct timeval *, const void __user *);
+extern int compat_put_timeval(const struct timeval *, void __user *);
struct compat_iovec {
compat_uptr_t iov_base;
@@ -224,6 +244,7 @@ struct compat_sysinfo;
struct compat_sysctl_args;
struct compat_kexec_segment;
struct compat_mq_attr;
+struct compat_msgbuf;
extern void compat_exit_robust_list(struct task_struct *curr);
@@ -234,13 +255,22 @@ asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
+#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
long compat_sys_semctl(int first, int second, int third, void __user *uptr);
long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
int version, void __user *uptr);
-long compat_sys_msgctl(int first, int second, void __user *uptr);
long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
void __user *uptr);
+#else
+long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
+ size_t msgsz, int msgflg);
+long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
+ size_t msgsz, long msgtyp, int msgflg);
+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
+#endif
+long compat_sys_msgctl(int first, int second, void __user *uptr);
long compat_sys_shmctl(int first, int second, void __user *uptr);
long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 712abcc205ae..6c26a3da0e03 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/completion.h>
+#include <linux/hrtimer.h>
#define CPUIDLE_STATE_MAX 8
#define CPUIDLE_NAME_LEN 16
@@ -43,12 +44,15 @@ struct cpuidle_state {
unsigned int flags;
unsigned int exit_latency; /* in US */
- unsigned int power_usage; /* in mW */
+ int power_usage; /* in mW */
unsigned int target_residency; /* in US */
+ unsigned int disable;
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
+
+ int (*enter_dead) (struct cpuidle_device *dev, int index);
};
/* Idle State Flags */
@@ -96,7 +100,6 @@ struct cpuidle_device {
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
- void *governor_data;
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -118,10 +121,12 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
****************************/
struct cpuidle_driver {
- char name[CPUIDLE_NAME_LEN];
+ const char *name;
struct module *owner;
unsigned int power_specified:1;
+ /* set to 1 to use the core cpuidle time keeping (for all states). */
+ unsigned int en_core_tk_irqen:1;
struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count;
int safe_state_index;
@@ -140,6 +145,11 @@ extern void cpuidle_pause_and_lock(void);
extern void cpuidle_resume_and_unlock(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
+extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index,
+ int (*enter)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index));
+extern int cpuidle_play_dead(void);
#else
static inline void disable_cpuidle(void) { }
@@ -157,6 +167,12 @@ static inline void cpuidle_resume_and_unlock(void) { }
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
+static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index,
+ int (*enter)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index))
+{ return -ENODEV; }
+static inline int cpuidle_play_dead(void) {return -ENODEV; }
#endif
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 7a7e5fd2a277..668f66baac7b 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -22,7 +22,7 @@ extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -135,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
cpumask_copy(mask, cpu_possible_mask);
}
-static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
{
- do_set_cpus_allowed(p, cpu_possible_mask);
- return cpumask_any(cpu_active_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e13117cbd2f7..5a736af3cc7a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -77,7 +77,7 @@ static inline u64 dma_get_mask(struct device *dev)
return DMA_BIT_MASK(32);
}
-#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask);
#else
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index a5966f691ef8..676f967390ae 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -18,14 +18,15 @@
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
-#ifndef DMAENGINE_H
-#define DMAENGINE_H
+#ifndef LINUX_DMAENGINE_H
+#define LINUX_DMAENGINE_H
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/bug.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
+#include <linux/types.h>
#include <asm/page.h>
/**
@@ -258,6 +259,7 @@ struct dma_chan_percpu {
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
+ * @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @device_node: used to add this to the device chan list
@@ -269,6 +271,7 @@ struct dma_chan_percpu {
struct dma_chan {
struct dma_device *device;
dma_cookie_t cookie;
+ dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
@@ -332,6 +335,9 @@ enum dma_slave_buswidth {
* may or may not be applicable on memory sources.
* @dst_maxburst: same as src_maxburst but for destination target
* mutatis mutandis.
+ * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
+ * with 'true' if peripheral should be flow controller. Direction will be
+ * selected at Runtime.
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -358,6 +364,7 @@ struct dma_slave_config {
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
u32 dst_maxburst;
+ bool device_fc;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -576,10 +583,11 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags);
+ unsigned long flags, void *context);
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction);
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context);
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
@@ -613,7 +621,24 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
struct scatterlist sg;
sg_init_one(&sg, buf, len);
- return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir)
+{
+ return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+ period_len, dir, NULL);
}
static inline int dmaengine_terminate_all(struct dma_chan *chan)
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index f2c64f92c4a0..2412e02d7c0f 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -31,18 +31,6 @@ struct dw_dma_platform_data {
unsigned char chan_priority;
};
-/**
- * enum dw_dma_slave_width - DMA slave register access width.
- * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
- * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
- * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
- */
-enum dw_dma_slave_width {
- DW_DMA_SLAVE_WIDTH_8BIT,
- DW_DMA_SLAVE_WIDTH_16BIT,
- DW_DMA_SLAVE_WIDTH_32BIT,
-};
-
/* bursts size */
enum dw_dma_msize {
DW_DMA_MSIZE_1,
@@ -55,47 +43,21 @@ enum dw_dma_msize {
DW_DMA_MSIZE_256,
};
-/* flow controller */
-enum dw_dma_fc {
- DW_DMA_FC_D_M2M,
- DW_DMA_FC_D_M2P,
- DW_DMA_FC_D_P2M,
- DW_DMA_FC_D_P2P,
- DW_DMA_FC_P_P2M,
- DW_DMA_FC_SP_P2P,
- DW_DMA_FC_P_M2P,
- DW_DMA_FC_DP_P2P,
-};
-
/**
* struct dw_dma_slave - Controller-specific information about a slave
*
* @dma_dev: required DMA master device
- * @tx_reg: physical address of data register used for
- * memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- * peripheral-to-memory transfers
- * @reg_width: peripheral register width
* @cfg_hi: Platform-specific initializer for the CFG_HI register
* @cfg_lo: Platform-specific initializer for the CFG_LO register
* @src_master: src master for transfers on allocated channel.
* @dst_master: dest master for transfers on allocated channel.
- * @src_msize: src burst size.
- * @dst_msize: dest burst size.
- * @fc: flow controller for DMA transfer
*/
struct dw_dma_slave {
struct device *dma_dev;
- dma_addr_t tx_reg;
- dma_addr_t rx_reg;
- enum dw_dma_slave_width reg_width;
u32 cfg_hi;
u32 cfg_lo;
u8 src_master;
u8 dst_master;
- u8 src_msize;
- u8 dst_msize;
- u8 fc;
};
/* Platform-configurable bits in CFG_HI */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 82163c4b32c9..158a41eed314 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -21,23 +21,45 @@
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
-/*
- * The embedded_fd_set is a small fd_set,
- * suitable for most tasks (which open <= BITS_PER_LONG files)
- */
-struct embedded_fd_set {
- unsigned long fds_bits[1];
-};
-
struct fdtable {
unsigned int max_fds;
struct file __rcu **fd; /* current fd array */
- fd_set *close_on_exec;
- fd_set *open_fds;
+ unsigned long *close_on_exec;
+ unsigned long *open_fds;
struct rcu_head rcu;
struct fdtable *next;
};
+static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
+{
+ __set_bit(fd, fdt->close_on_exec);
+}
+
+static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
+{
+ __clear_bit(fd, fdt->close_on_exec);
+}
+
+static inline bool close_on_exec(int fd, const struct fdtable *fdt)
+{
+ return test_bit(fd, fdt->close_on_exec);
+}
+
+static inline void __set_open_fd(int fd, struct fdtable *fdt)
+{
+ __set_bit(fd, fdt->open_fds);
+}
+
+static inline void __clear_open_fd(int fd, struct fdtable *fdt)
+{
+ __clear_bit(fd, fdt->open_fds);
+}
+
+static inline bool fd_is_open(int fd, const struct fdtable *fdt)
+{
+ return test_bit(fd, fdt->open_fds);
+}
+
/*
* Open file table structure
*/
@@ -53,8 +75,8 @@ struct files_struct {
*/
spinlock_t file_lock ____cacheline_aligned_in_smp;
int next_fd;
- struct embedded_fd_set close_on_exec_init;
- struct embedded_fd_set open_fds_init;
+ unsigned long close_on_exec_init[1];
+ unsigned long open_fds_init[1];
struct file __rcu * fd_array[NR_OPEN_DEFAULT];
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c437f914d537..135693e79f2b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -92,6 +92,10 @@ struct inodes_stat_t {
/* File is opened using open(.., 3, ..) and is writeable only for ioctls
(specialy hack for floppy.c) */
#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
+/* 32bit hashes as llseek() offset (for directories) */
+#define FMODE_32BITHASH ((__force fmode_t)0x200)
+/* 64bit hashes as llseek() offset (for directories) */
+#define FMODE_64BITHASH ((__force fmode_t)0x400)
/*
* Don't update ctime and mtime.
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/include/linux/fsl/mxs-dma.h
index 203d7c4a3e11..203d7c4a3e11 100644
--- a/arch/arm/mach-mxs/include/mach/dma.h
+++ b/include/linux/fsl/mxs-dma.h
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 004ff33ab38e..a7e977ff4abf 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -6,7 +6,7 @@ struct device;
struct gpio_keys_button {
/* Configuration parameters */
unsigned int code; /* input event code (KEY_*, SW_*) */
- int gpio;
+ int gpio; /* -1 if this key does not support gpio */
int active_low;
const char *desc;
unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */
@@ -14,6 +14,7 @@ struct gpio_keys_button {
int debounce_interval; /* debounce ticks interval in msecs */
bool can_disable;
int value; /* axis value for EV_ABS */
+ unsigned int irq; /* Irq number in case of interrupt keys */
};
struct gpio_keys_platform_data {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5db52d0ff1d4..a5375e7f3fea 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
+#include <linux/sysinfo.h>
+
/*
* 'kernel.h' contains some often-used function prototypes etc
*/
@@ -698,27 +700,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
#endif
-struct sysinfo;
extern int do_sysinfo(struct sysinfo *info);
#endif /* __KERNEL__ */
-#define SI_LOAD_SHIFT 16
-struct sysinfo {
- long uptime; /* Seconds since boot */
- unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
- unsigned long totalram; /* Total usable main memory size */
- unsigned long freeram; /* Available memory size */
- unsigned long sharedram; /* Amount of shared memory */
- unsigned long bufferram; /* Memory used by buffers */
- unsigned long totalswap; /* Total swap space size */
- unsigned long freeswap; /* swap space still available */
- unsigned short procs; /* Number of current processes */
- unsigned short pad; /* explicit padding for m68k */
- unsigned long totalhigh; /* Total high memory size */
- unsigned long freehigh; /* Available high memory size */
- unsigned int mem_unit; /* Memory unit size in bytes */
- char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
-};
-
#endif
diff --git a/include/linux/lp8727.h b/include/linux/lp8727.h
index d21fa2865bf4..ea98c6133d32 100644
--- a/include/linux/lp8727.h
+++ b/include/linux/lp8727.h
@@ -1,4 +1,7 @@
/*
+ * LP8727 Micro/Mini USB IC with integrated charger
+ *
+ * Copyright (C) 2011 Texas Instruments
* Copyright (C) 2011 National Semiconductor
*
* This program is free software; you can redistribute it and/or modify
@@ -32,13 +35,24 @@ enum lp8727_ichg {
ICHG_1000mA,
};
+/**
+ * struct lp8727_chg_param
+ * @eoc_level : end of charge level setting
+ * @ichg : charging current
+ */
struct lp8727_chg_param {
- /* end of charge level setting */
enum lp8727_eoc_level eoc_level;
- /* charging current */
enum lp8727_ichg ichg;
};
+/**
+ * struct lp8727_platform_data
+ * @get_batt_present : check battery status - exists or not
+ * @get_batt_level : get battery voltage (mV)
+ * @get_batt_capacity : get battery capacity (%)
+ * @get_batt_temp : get battery temperature
+ * @ac, @usb : charging parameters each charger type
+ */
struct lp8727_platform_data {
u8 (*get_batt_present)(void);
u16 (*get_batt_level)(void);
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 5fa697477b71..ee96cd51d8b2 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -146,6 +146,279 @@ struct abx500_init_settings {
u8 setting;
};
+/* Battery driver related data */
+/*
+ * ADC for the battery thermistor.
+ * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * with a NTC resistor to both identify the battery and to measure its
+ * temperature. Different phone manufactures uses different techniques to both
+ * identify the battery and to read its temperature.
+ */
+enum abx500_adc_therm {
+ ABx500_ADC_THERM_BATCTRL,
+ ABx500_ADC_THERM_BATTEMP,
+};
+
+/**
+ * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct abx500_res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * @voltage: Voltage in mV
+ * @capacity: Capacity in percent
+ */
+struct abx500_v_to_cap {
+ int voltage;
+ int capacity;
+};
+
+/* Forward declaration */
+struct abx500_fg;
+
+/**
+ * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ * @overbat_threshold: Over battery threshold, in mV
+ * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
+ * Resolution in 50 mV step.
+ * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
+ * Resolution in 50 mV step.
+ * @user_cap_limit Capacity reported from user must be within this
+ * limit to be considered as sane, in percentage
+ * points.
+ * @maint_thres This is the threshold where we stop reporting
+ * battery full while in maintenance, in per cent
+ */
+struct abx500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+ int overbat_threshold;
+ int battok_falling_th_sel0;
+ int battok_raising_th_sel1;
+ int user_cap_limit;
+ int maint_thres;
+};
+
+/**
+ * struct abx500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct abx500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct abx500_battery_type - different batteries supported
+ * @name: battery technology
+ * @resis_high: battery upper resistance limit
+ * @resis_low: battery lower resistance limit
+ * @charge_full_design: Maximum battery capacity in mAh
+ * @nominal_voltage: Nominal voltage of the battery in mV
+ * @termination_vol: max voltage upto which battery can be charged
+ * @termination_curr battery charging termination current in mA
+ * @recharge_vol battery voltage limit that will trigger a new
+ * full charging cycle in the case where maintenan-
+ * -ce charging has been disabled
+ * @normal_cur_lvl: charger current in normal state in mA
+ * @normal_vol_lvl: charger voltage in normal state in mV
+ * @maint_a_cur_lvl: charger current in maintenance A state in mA
+ * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
+ * @maint_a_chg_timer_h: charge time in maintenance A state
+ * @maint_b_cur_lvl: charger current in maintenance B state in mA
+ * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
+ * @maint_b_chg_timer_h: charge time in maintenance B state
+ * @low_high_cur_lvl: charger current in temp low/high state in mA
+ * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
+ * @battery_resistance: battery inner resistance in mOhm.
+ * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
+ * @r_to_t_tbl: table containing resistance to temp points
+ * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
+ * @v_to_cap_tbl: Voltage to capacity (in %) table
+ * @n_batres_tbl_elements number of elements in the batres_tbl
+ * @batres_tbl battery internal resistance vs temperature table
+ */
+struct abx500_battery_type {
+ int name;
+ int resis_high;
+ int resis_low;
+ int charge_full_design;
+ int nominal_voltage;
+ int termination_vol;
+ int termination_curr;
+ int recharge_vol;
+ int normal_cur_lvl;
+ int normal_vol_lvl;
+ int maint_a_cur_lvl;
+ int maint_a_vol_lvl;
+ int maint_a_chg_timer_h;
+ int maint_b_cur_lvl;
+ int maint_b_vol_lvl;
+ int maint_b_chg_timer_h;
+ int low_high_cur_lvl;
+ int low_high_vol_lvl;
+ int battery_resistance;
+ int n_temp_tbl_elements;
+ struct abx500_res_to_temp *r_to_t_tbl;
+ int n_v_cap_tbl_elements;
+ struct abx500_v_to_cap *v_to_cap_tbl;
+ int n_batres_tbl_elements;
+ struct batres_vs_temp *batres_tbl;
+};
+
+/**
+ * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct abx500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct abx500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct abx500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct abx500_bm_data - abx500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @temp_now present battery temperature
+ * @temp_interval_chg temperature measurement interval in s when charging
+ * @temp_interval_nochg temperature measurement interval in s when not charging
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @auto_trig flag to enable auto adc trigger
+ * @fg_res resistance of FG resistor in 0.1mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
+ * @maxi: maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct abx500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_now;
+ int temp_interval_chg;
+ int temp_interval_nochg;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool no_maintenance;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ bool auto_trig;
+ enum abx500_adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ int gnd_lift_resistance;
+ const struct abx500_maxim_parameters *maxi;
+ const struct abx500_bm_capacity_levels *cap_levels;
+ const struct abx500_battery_type *bat_type;
+ const struct abx500_bm_charger_parameters *chg_params;
+ const struct abx500_fg_parameters *fg_params;
+};
+
+struct abx500_chargalg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_charger_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+ bool autopower_cfg;
+};
+
+struct abx500_btemp_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_fg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_bm_plat_data {
+ struct abx500_bm_data *battery;
+ struct abx500_charger_platform_data *charger;
+ struct abx500_btemp_platform_data *btemp;
+ struct abx500_fg_platform_data *fg;
+ struct abx500_chargalg_platform_data *chargalg;
+};
+
int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
u8 value);
int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
new file mode 100644
index 000000000000..44310c98ee6e
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright ST-Ericsson 2012.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB8500_BM_H
+#define _AB8500_BM_H
+
+#include <linux/kernel.h>
+#include <linux/mfd/abx500.h>
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB8500_MAIN_WDOG_CTRL_REG 0x01
+#define AB8500_LOW_BAT_REG 0x03
+#define AB8500_BATT_OK_REG 0x04
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB8500_USB_LINE_STAT_REG 0x80
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_STATUS1_REG 0x00
+#define AB8500_CH_STATUS2_REG 0x01
+#define AB8500_CH_USBCH_STAT1_REG 0x02
+#define AB8500_CH_USBCH_STAT2_REG 0x03
+#define AB8500_CH_FSM_STAT_REG 0x04
+#define AB8500_CH_STAT_REG 0x05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_VOLT_LVL_REG 0x40
+#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
+#define AB8500_CH_OPT_CRNTLVL_REG 0x42
+#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
+#define AB8500_CH_WD_TIMER_REG 0x50
+#define AB8500_CHARG_WD_CTRL 0x51
+#define AB8500_BTEMP_HIGH_TH 0x52
+#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
+#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
+#define AB8500_BATT_OVV 0x55
+#define AB8500_CHARGER_CTRL 0x56
+#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
+
+/*
+ * Charger / main control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_MCH_CTRL1 0x80
+#define AB8500_MCH_CTRL2 0x81
+#define AB8500_MCH_IPT_CURLVL_REG 0x82
+#define AB8500_CH_WD_REG 0x83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_USBCH_CTRL1_REG 0xC0
+#define AB8500_USBCH_CTRL2_REG 0xC1
+#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
+
+/*
+ * Gas Gauge register offsets
+ * Bank : 0x0C
+ */
+#define AB8500_GASG_CC_CTRL_REG 0x00
+#define AB8500_GASG_CC_ACCU1_REG 0x01
+#define AB8500_GASG_CC_ACCU2_REG 0x02
+#define AB8500_GASG_CC_ACCU3_REG 0x03
+#define AB8500_GASG_CC_ACCU4_REG 0x04
+#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
+#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
+#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
+#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
+#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
+#define AB8500_GASG_CC_OFFSET_REG 0x0A
+#define AB8500_GASG_CC_NCOV_ACCU 0x10
+#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
+#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
+#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
+#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE21_REG 0x14
+
+/*
+ * RTC register offsets
+ * Bank: 0x0F
+ */
+#define AB8500_RTC_BACKUP_CHG_REG 0x0C
+#define AB8500_RTC_CC_CONF_REG 0x01
+#define AB8500_RTC_CTRL_REG 0x0B
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_OTP_CONF_15 0x0E
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION 1024
+#define ADC_CH_MAIN_MIN 0
+#define ADC_CH_MAIN_MAX 20030
+#define ADC_CH_VBUS_MIN 0
+#define ADC_CH_VBUS_MAX 20030
+#define ADC_CH_VBAT_MIN 2300
+#define ADC_CH_VBAT_MAX 4800
+#define ADC_CH_BKBAT_MIN 0
+#define ADC_CH_BKBAT_MAX 3200
+
+/* Main charge i/p current */
+#define MAIN_CH_IP_CUR_0P9A 0x80
+#define MAIN_CH_IP_CUR_1P0A 0x90
+#define MAIN_CH_IP_CUR_1P1A 0xA0
+#define MAIN_CH_IP_CUR_1P2A 0xB0
+#define MAIN_CH_IP_CUR_1P3A 0xC0
+#define MAIN_CH_IP_CUR_1P4A 0xD0
+#define MAIN_CH_IP_CUR_1P5A 0xE0
+
+/* ChVoltLevel */
+#define CH_VOL_LVL_3P5 0x00
+#define CH_VOL_LVL_4P0 0x14
+#define CH_VOL_LVL_4P05 0x16
+#define CH_VOL_LVL_4P1 0x1B
+#define CH_VOL_LVL_4P15 0x20
+#define CH_VOL_LVL_4P2 0x25
+#define CH_VOL_LVL_4P6 0x4D
+
+/* ChOutputCurrentLevel */
+#define CH_OP_CUR_LVL_0P1 0x00
+#define CH_OP_CUR_LVL_0P2 0x01
+#define CH_OP_CUR_LVL_0P3 0x02
+#define CH_OP_CUR_LVL_0P4 0x03
+#define CH_OP_CUR_LVL_0P5 0x04
+#define CH_OP_CUR_LVL_0P6 0x05
+#define CH_OP_CUR_LVL_0P7 0x06
+#define CH_OP_CUR_LVL_0P8 0x07
+#define CH_OP_CUR_LVL_0P9 0x08
+#define CH_OP_CUR_LVL_1P4 0x0D
+#define CH_OP_CUR_LVL_1P5 0x0E
+#define CH_OP_CUR_LVL_1P6 0x0F
+
+/* BTEMP High thermal limits */
+#define BTEMP_HIGH_TH_57_0 0x00
+#define BTEMP_HIGH_TH_52 0x01
+#define BTEMP_HIGH_TH_57_1 0x02
+#define BTEMP_HIGH_TH_62 0x03
+
+/* current is mA */
+#define USB_0P1A 100
+#define USB_0P2A 200
+#define USB_0P3A 300
+#define USB_0P4A 400
+#define USB_0P5A 500
+
+#define LOW_BAT_3P1V 0x20
+#define LOW_BAT_2P3V 0x00
+#define LOW_BAT_RESET 0x01
+#define LOW_BAT_ENABLE 0x01
+
+/* Backup battery constants */
+#define BUP_ICH_SEL_50UA 0x00
+#define BUP_ICH_SEL_150UA 0x04
+#define BUP_ICH_SEL_300UA 0x08
+#define BUP_ICH_SEL_700UA 0x0C
+
+#define BUP_VCH_SEL_2P5V 0x00
+#define BUP_VCH_SEL_2P6V 0x01
+#define BUP_VCH_SEL_2P8V 0x02
+#define BUP_VCH_SEL_3P1V 0x03
+
+/* Battery OVV constants */
+#define BATT_OVV_ENA 0x02
+#define BATT_OVV_TH_3P7 0x00
+#define BATT_OVV_TH_4P75 0x01
+
+/* A value to indicate over voltage */
+#define BATT_OVV_VALUE 4750
+
+/* VBUS OVV constants */
+#define VBUS_OVV_SELECT_MASK 0x78
+#define VBUS_OVV_SELECT_5P6V 0x00
+#define VBUS_OVV_SELECT_5P7V 0x08
+#define VBUS_OVV_SELECT_5P8V 0x10
+#define VBUS_OVV_SELECT_5P9V 0x18
+#define VBUS_OVV_SELECT_6P0V 0x20
+#define VBUS_OVV_SELECT_6P1V 0x28
+#define VBUS_OVV_SELECT_6P2V 0x30
+#define VBUS_OVV_SELECT_6P3V 0x38
+
+#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
+
+/* Fuel Gauge constants */
+#define RESET_ACCU 0x02
+#define READ_REQ 0x01
+#define CC_DEEP_SLEEP_ENA 0x02
+#define CC_PWR_UP_ENA 0x01
+#define CC_SAMPLES_40 0x28
+#define RD_NCONV_ACCU_REQ 0x01
+#define CC_CALIB 0x08
+#define CC_INTAVGOFFSET_ENA 0x10
+#define CC_MUXOFFSET 0x80
+#define CC_INT_CAL_N_AVG_MASK 0x60
+#define CC_INT_CAL_SAMPLES_16 0x40
+#define CC_INT_CAL_SAMPLES_8 0x20
+#define CC_INT_CAL_SAMPLES_4 0x00
+
+/* RTC constants */
+#define RTC_BUP_CH_ENA 0x10
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA 0x01
+#define BAT_CTRL_20U_ENA 0x02
+#define BAT_CTRL_CMP_ENA 0x04
+#define FORCE_BAT_CTRL_CMP_HIGH 0x08
+#define BAT_CTRL_PULL_UP_ENA 0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN 00
+
+/**
+ * struct res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct batres_vs_temp - defines one point in a temp vs battery internal
+ * resistance curve.
+ * @temp: battery pack temperature in Celcius
+ * @resist: battery internal reistance in mOhm
+ */
+struct batres_vs_temp {
+ int temp;
+ int resist;
+};
+
+/* Forward declaration */
+struct ab8500_fg;
+
+/**
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
+ * Resolution in 50 mV step.
+ * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
+ * Resolution in 50 mV step.
+ * @user_cap_limit Capacity reported from user must be within this
+ * limit to be considered as sane, in percentage
+ * points.
+ * @maint_thres This is the threshold where we stop reporting
+ * battery full while in maintenance, in per cent
+ */
+struct ab8500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+ int battok_falling_th_sel0;
+ int battok_raising_th_sel1;
+ int user_cap_limit;
+ int maint_thres;
+};
+
+/**
+ * struct ab8500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct ab8500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct ab8500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct ab8500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct ab8500_bm_data - ab8500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @temp_interval_chg temperature measurement interval in s when charging
+ * @temp_interval_nochg temperature measurement interval in s when not charging
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @fg_res resistance of FG resistor in 0.1mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
+ * @maxi: maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct ab8500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_interval_chg;
+ int temp_interval_nochg;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool no_maintenance;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ enum abx500_adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ int gnd_lift_resistance;
+ const struct ab8500_maxim_parameters *maxi;
+ const struct ab8500_bm_capacity_levels *cap_levels;
+ const struct ab8500_bm_charger_parameters *chg_params;
+ const struct ab8500_fg_parameters *fg_params;
+};
+
+struct ab8500_charger_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+ bool autopower_cfg;
+};
+
+struct ab8500_btemp_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_fg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_chargalg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+struct ab8500_btemp;
+struct ab8500_gpadc;
+struct ab8500_fg;
+#ifdef CONFIG_AB8500_BM
+void ab8500_fg_reinit(void);
+void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
+struct ab8500_btemp *ab8500_btemp_get(void);
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
+struct ab8500_fg *ab8500_fg_get(void);
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
+
+#else
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
+{
+}
+static void ab8500_fg_reinit(void)
+{
+}
+static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
+{
+}
+static struct ab8500_btemp *ab8500_btemp_get(void)
+{
+ return NULL;
+}
+static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+ return 0;
+}
+struct ab8500_fg *ab8500_fg_get(void)
+{
+ return NULL;
+}
+static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
+{
+ return -ENODEV;
+}
+
+static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+{
+ return -ENODEV;
+}
+
+static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
new file mode 100644
index 000000000000..9b07725750c9
--- /dev/null
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+#define psy_to_ux500_charger(x) container_of((x), \
+ struct ux500_charger, psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy power supply base class
+ * @ops ux500 charger operations
+ * @max_out_volt maximum output charger voltage in mV
+ * @max_out_curr maximum output charger current in mA
+ */
+struct ux500_charger {
+ struct power_supply psy;
+ struct ux500_charger_ops ops;
+ int max_out_volt;
+ int max_out_curr;
+};
+
+#endif
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index c4eec228eef9..650ef352f045 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -112,6 +112,11 @@ struct nand_bbt_descr {
#define NAND_BBT_USE_FLASH 0x00020000
/* Do not store flash based bad block table in OOB area; store it in-band */
#define NAND_BBT_NO_OOB 0x00040000
+/*
+ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
+ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
+ */
+#define NAND_BBT_NO_OOB_BBM 0x00080000
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 1bbd9f289245..ed270bd2e4df 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -47,6 +47,7 @@ struct mtd_blktrans_dev {
struct request_queue *rq;
spinlock_t queue_lock;
void *priv;
+ fmode_t file_mode;
};
struct mtd_blktrans_ops {
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index 6987995ad3cf..b20029221fb1 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -26,95 +26,83 @@
#define FSMC_NAND_BW8 1
#define FSMC_NAND_BW16 2
-/*
- * The placement of the Command Latch Enable (CLE) and
- * Address Latch Enable (ALE) is twisted around in the
- * SPEAR310 implementation.
- */
-#if defined(CONFIG_MACH_SPEAR310)
-#define PLAT_NAND_CLE (1 << 17)
-#define PLAT_NAND_ALE (1 << 16)
-#else
-#define PLAT_NAND_CLE (1 << 16)
-#define PLAT_NAND_ALE (1 << 17)
-#endif
-
#define FSMC_MAX_NOR_BANKS 4
#define FSMC_MAX_NAND_BANKS 4
#define FSMC_FLASH_WIDTH8 1
#define FSMC_FLASH_WIDTH16 2
-struct fsmc_nor_bank_regs {
- uint32_t ctrl;
- uint32_t ctrl_tim;
-};
-
-/* ctrl register definitions */
-#define BANK_ENABLE (1 << 0)
-#define MUXED (1 << 1)
-#define NOR_DEV (2 << 2)
-#define WIDTH_8 (0 << 4)
-#define WIDTH_16 (1 << 4)
-#define RSTPWRDWN (1 << 6)
-#define WPROT (1 << 7)
-#define WRT_ENABLE (1 << 12)
-#define WAIT_ENB (1 << 13)
-
-/* ctrl_tim register definitions */
-
-struct fsmc_nand_bank_regs {
- uint32_t pc;
- uint32_t sts;
- uint32_t comm;
- uint32_t attrib;
- uint32_t ioata;
- uint32_t ecc1;
- uint32_t ecc2;
- uint32_t ecc3;
-};
-
+/* fsmc controller registers for NOR flash */
+#define CTRL 0x0
+ /* ctrl register definitions */
+ #define BANK_ENABLE (1 << 0)
+ #define MUXED (1 << 1)
+ #define NOR_DEV (2 << 2)
+ #define WIDTH_8 (0 << 4)
+ #define WIDTH_16 (1 << 4)
+ #define RSTPWRDWN (1 << 6)
+ #define WPROT (1 << 7)
+ #define WRT_ENABLE (1 << 12)
+ #define WAIT_ENB (1 << 13)
+
+#define CTRL_TIM 0x4
+ /* ctrl_tim register definitions */
+
+#define FSMC_NOR_BANK_SZ 0x8
#define FSMC_NOR_REG_SIZE 0x40
-struct fsmc_regs {
- struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS];
- uint8_t reserved_1[0x40 - 0x20];
- struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
- uint8_t reserved_2[0xfe0 - 0xc0];
- uint32_t peripid0; /* 0xfe0 */
- uint32_t peripid1; /* 0xfe4 */
- uint32_t peripid2; /* 0xfe8 */
- uint32_t peripid3; /* 0xfec */
- uint32_t pcellid0; /* 0xff0 */
- uint32_t pcellid1; /* 0xff4 */
- uint32_t pcellid2; /* 0xff8 */
- uint32_t pcellid3; /* 0xffc */
-};
+#define FSMC_NOR_REG(base, bank, reg) (base + \
+ FSMC_NOR_BANK_SZ * (bank) + \
+ reg)
+
+/* fsmc controller registers for NAND flash */
+#define PC 0x00
+ /* pc register definitions */
+ #define FSMC_RESET (1 << 0)
+ #define FSMC_WAITON (1 << 1)
+ #define FSMC_ENABLE (1 << 2)
+ #define FSMC_DEVTYPE_NAND (1 << 3)
+ #define FSMC_DEVWID_8 (0 << 4)
+ #define FSMC_DEVWID_16 (1 << 4)
+ #define FSMC_ECCEN (1 << 6)
+ #define FSMC_ECCPLEN_512 (0 << 7)
+ #define FSMC_ECCPLEN_256 (1 << 7)
+ #define FSMC_TCLR_1 (1)
+ #define FSMC_TCLR_SHIFT (9)
+ #define FSMC_TCLR_MASK (0xF)
+ #define FSMC_TAR_1 (1)
+ #define FSMC_TAR_SHIFT (13)
+ #define FSMC_TAR_MASK (0xF)
+#define STS 0x04
+ /* sts register definitions */
+ #define FSMC_CODE_RDY (1 << 15)
+#define COMM 0x08
+ /* comm register definitions */
+ #define FSMC_TSET_0 0
+ #define FSMC_TSET_SHIFT 0
+ #define FSMC_TSET_MASK 0xFF
+ #define FSMC_TWAIT_6 6
+ #define FSMC_TWAIT_SHIFT 8
+ #define FSMC_TWAIT_MASK 0xFF
+ #define FSMC_THOLD_4 4
+ #define FSMC_THOLD_SHIFT 16
+ #define FSMC_THOLD_MASK 0xFF
+ #define FSMC_THIZ_1 1
+ #define FSMC_THIZ_SHIFT 24
+ #define FSMC_THIZ_MASK 0xFF
+#define ATTRIB 0x0C
+#define IOATA 0x10
+#define ECC1 0x14
+#define ECC2 0x18
+#define ECC3 0x1C
+#define FSMC_NAND_BANK_SZ 0x20
+
+#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
+ (FSMC_NAND_BANK_SZ * (bank)) + \
+ reg)
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
-/* pc register definitions */
-#define FSMC_RESET (1 << 0)
-#define FSMC_WAITON (1 << 1)
-#define FSMC_ENABLE (1 << 2)
-#define FSMC_DEVTYPE_NAND (1 << 3)
-#define FSMC_DEVWID_8 (0 << 4)
-#define FSMC_DEVWID_16 (1 << 4)
-#define FSMC_ECCEN (1 << 6)
-#define FSMC_ECCPLEN_512 (0 << 7)
-#define FSMC_ECCPLEN_256 (1 << 7)
-#define FSMC_TCLR_1 (1 << 9)
-#define FSMC_TAR_1 (1 << 13)
-
-/* sts register definitions */
-#define FSMC_CODE_RDY (1 << 15)
-
-/* comm register definitions */
-#define FSMC_TSET_0 (0 << 0)
-#define FSMC_TWAIT_6 (6 << 8)
-#define FSMC_THOLD_4 (4 << 16)
-#define FSMC_THIZ_1 (1 << 24)
-
/*
* There are 13 bytes of ecc for every 512 byte block in FSMC version 8
* and it has to be read consecutively and immediately after the 512
@@ -133,6 +121,20 @@ struct fsmc_eccplace {
struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
};
+struct fsmc_nand_timings {
+ uint8_t tclr;
+ uint8_t tar;
+ uint8_t thiz;
+ uint8_t thold;
+ uint8_t twait;
+ uint8_t tset;
+};
+
+enum access_mode {
+ USE_DMA_ACCESS = 1,
+ USE_WORD_ACCESS,
+};
+
/**
* fsmc_nand_platform_data - platform specific NAND controller config
* @partitions: partition table for the platform, use a default fallback
@@ -146,12 +148,23 @@ struct fsmc_eccplace {
* this may be set to NULL
*/
struct fsmc_nand_platform_data {
+ struct fsmc_nand_timings *nand_timings;
struct mtd_partition *partitions;
unsigned int nr_partitions;
unsigned int options;
unsigned int width;
unsigned int bank;
+
+ /* CLE, ALE offsets */
+ unsigned int cle_off;
+ unsigned int ale_off;
+ enum access_mode mode;
+
void (*select_bank)(uint32_t bank, uint32_t busw);
+
+ /* priv structures for dma accesses */
+ void *read_dma_priv;
+ void *write_dma_priv;
};
extern int __init fsmc_nor_init(struct platform_device *pdev,
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index d43dc25af82e..cf5ea8cdcf8e 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -164,6 +164,9 @@ struct mtd_info {
/* ECC layout structure pointer - read only! */
struct nand_ecclayout *ecclayout;
+ /* max number of correctible bit errors per writesize */
+ unsigned int ecc_strength;
+
/* Data for variable erase regions. If numeraseregions is zero,
* it means that the whole device has erasesize as given above.
*/
@@ -174,52 +177,52 @@ struct mtd_info {
* Do not call via these pointers, use corresponding mtd_*()
* wrappers instead.
*/
- int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
- int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys);
- void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
- unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags);
- int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
- int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
- int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
- int (*read_oob) (struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops);
- int (*write_oob) (struct mtd_info *mtd, loff_t to,
+ int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
+ int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+ int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
+ unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags);
+ int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+ int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+ int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+ int (*_read_oob) (struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
- int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
- size_t len);
- int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf);
- int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
- size_t len);
- int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf);
- int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, u_char *buf);
- int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
- size_t len);
- int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
+ int (*_write_oob) (struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+ int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+ int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+ int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
+ int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
- void (*sync) (struct mtd_info *mtd);
- int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
- int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
- int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
- int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
- int (*suspend) (struct mtd_info *mtd);
- void (*resume) (struct mtd_info *mtd);
+ void (*_sync) (struct mtd_info *mtd);
+ int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*_suspend) (struct mtd_info *mtd);
+ void (*_resume) (struct mtd_info *mtd);
/*
* If the driver is something smart, like UBI, it may need to maintain
* its own reference counting. The below functions are only for driver.
*/
- int (*get_device) (struct mtd_info *mtd);
- void (*put_device) (struct mtd_info *mtd);
+ int (*_get_device) (struct mtd_info *mtd);
+ void (*_put_device) (struct mtd_info *mtd);
/* Backing device capabilities for this device
* - provides mmap capabilities
@@ -240,214 +243,75 @@ struct mtd_info {
int usecount;
};
-/*
- * Erase is an asynchronous operation. Device drivers are supposed
- * to call instr->callback() whenever the operation completes, even
- * if it completes with a failure.
- * Callers are supposed to pass a callback function and wait for it
- * to be called before writing to the block.
- */
-static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- return mtd->erase(mtd, instr);
-}
-
-/*
- * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
- */
-static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys)
-{
- *retlen = 0;
- if (!mtd->point)
- return -EOPNOTSUPP;
- return mtd->point(mtd, from, len, retlen, virt, phys);
-}
-
-/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
-static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
- return mtd->unpoint(mtd, from, len);
-}
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- if (!mtd->get_unmapped_area)
- return -EOPNOTSUPP;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
-}
-
-static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- return mtd->read(mtd, from, len, retlen, buf);
-}
-
-static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- *retlen = 0;
- if (!mtd->write)
- return -EROFS;
- return mtd->write(mtd, to, len, retlen, buf);
-}
-
-/*
- * In blackbox flight recorder like scenarios we want to make successful writes
- * in interrupt context. panic_write() is only intended to be called when its
- * known the kernel is about to panic and we need the write to succeed. Since
- * the kernel is not going to be running for much longer, this function can
- * break locks and delay to ensure the write succeeds (but not sleep).
- */
-static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- *retlen = 0;
- if (!mtd->panic_write)
- return -EOPNOTSUPP;
- return mtd->panic_write(mtd, to, len, retlen, buf);
-}
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys);
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags);
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf);
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf);
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf);
static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
ops->retlen = ops->oobretlen = 0;
- if (!mtd->read_oob)
+ if (!mtd->_read_oob)
return -EOPNOTSUPP;
- return mtd->read_oob(mtd, from, ops);
+ return mtd->_read_oob(mtd, from, ops);
}
static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
ops->retlen = ops->oobretlen = 0;
- if (!mtd->write_oob)
- return -EOPNOTSUPP;
- return mtd->write_oob(mtd, to, ops);
-}
-
-/*
- * Method to access the protection register area, present in some flash
- * devices. The user data is one time programmable but the factory data is read
- * only.
- */
-static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
-{
- if (!mtd->get_fact_prot_info)
+ if (!mtd->_write_oob)
return -EOPNOTSUPP;
- return mtd->get_fact_prot_info(mtd, buf, len);
-}
-
-static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen,
- u_char *buf)
-{
- *retlen = 0;
- if (!mtd->read_fact_prot_reg)
- return -EOPNOTSUPP;
- return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
-}
-
-static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf,
- size_t len)
-{
- if (!mtd->get_user_prot_info)
- return -EOPNOTSUPP;
- return mtd->get_user_prot_info(mtd, buf, len);
-}
-
-static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen,
- u_char *buf)
-{
- *retlen = 0;
- if (!mtd->read_user_prot_reg)
- return -EOPNOTSUPP;
- return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
-}
-
-static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
- size_t len, size_t *retlen,
- u_char *buf)
-{
- *retlen = 0;
- if (!mtd->write_user_prot_reg)
- return -EOPNOTSUPP;
- return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ return mtd->_write_oob(mtd, to, ops);
}
-static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len)
-{
- if (!mtd->lock_user_prot_reg)
- return -EOPNOTSUPP;
- return mtd->lock_user_prot_reg(mtd, from, len);
-}
+int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf);
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static inline void mtd_sync(struct mtd_info *mtd)
{
- if (mtd->sync)
- mtd->sync(mtd);
-}
-
-/* Chip-supported device locking */
-static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- if (!mtd->lock)
- return -EOPNOTSUPP;
- return mtd->lock(mtd, ofs, len);
+ if (mtd->_sync)
+ mtd->_sync(mtd);
}
-static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- if (!mtd->unlock)
- return -EOPNOTSUPP;
- return mtd->unlock(mtd, ofs, len);
-}
-
-static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- if (!mtd->is_locked)
- return -EOPNOTSUPP;
- return mtd->is_locked(mtd, ofs, len);
-}
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
static inline int mtd_suspend(struct mtd_info *mtd)
{
- return mtd->suspend ? mtd->suspend(mtd) : 0;
+ return mtd->_suspend ? mtd->_suspend(mtd) : 0;
}
static inline void mtd_resume(struct mtd_info *mtd)
{
- if (mtd->resume)
- mtd->resume(mtd);
-}
-
-static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- if (!mtd->block_isbad)
- return 0;
- return mtd->block_isbad(mtd, ofs);
-}
-
-static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- if (!mtd->block_markbad)
- return -EOPNOTSUPP;
- return mtd->block_markbad(mtd, ofs);
+ if (mtd->_resume)
+ mtd->_resume(mtd);
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -482,12 +346,12 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
- return mtd->read_oob && mtd->write_oob;
+ return mtd->_read_oob && mtd->_write_oob;
}
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
- return !!mtd->block_isbad;
+ return !!mtd->_block_isbad;
}
/* Kernel-side ioctl definitions */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 63b5a8b6dfbd..1482340d3d9f 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,6 +324,7 @@ struct nand_hw_control {
* @steps: number of ECC steps per page
* @size: data bytes per ECC step
* @bytes: ECC bytes per step
+ * @strength: max number of correctible bits per ECC step
* @total: total number of ECC bytes per page
* @prepad: padding information for syndrome based ECC generators
* @postpad: padding information for syndrome based ECC generators
@@ -351,6 +352,7 @@ struct nand_ecc_ctrl {
int size;
int bytes;
int total;
+ int strength;
int prepad;
int postpad;
struct nand_ecclayout *layout;
@@ -448,8 +450,9 @@ struct nand_buffers {
* will be copied to the appropriate nand_bbt_descr's.
* @badblockpos: [INTERN] position of the bad block marker in the oob
* area.
- * @badblockbits: [INTERN] number of bits to left-shift the bad block
- * number
+ * @badblockbits: [INTERN] minimum number of set bits in a good block's
+ * bad block marker position; i.e., BBM == 11110111b is
+ * not bad when badblockbits == 7
* @cellinfo: [INTERN] MLC/multichip data from chip ident
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
diff --git a/include/linux/mtd/pmc551.h b/include/linux/mtd/pmc551.h
deleted file mode 100644
index 27ad40aed19f..000000000000
--- a/include/linux/mtd/pmc551.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * PMC551 PCI Mezzanine Ram Device
- *
- * Author:
- * Mark Ferrell
- * Copyright 1999,2000 Nortel Networks
- *
- * License:
- * As part of this driver was derrived from the slram.c driver it falls
- * under the same license, which is GNU General Public License v2
- */
-
-#ifndef __MTD_PMC551_H__
-#define __MTD_PMC551_H__
-
-#include <linux/mtd/mtd.h>
-
-#define PMC551_VERSION \
- "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
-
-/*
- * Our personal and private information
- */
-struct mypriv {
- struct pci_dev *dev;
- u_char *start;
- u32 base_map0;
- u32 curr_map0;
- u32 asize;
- struct mtd_info *nextpmc551;
-};
-
-/*
- * Function Prototypes
- */
-static int pmc551_erase(struct mtd_info *, struct erase_info *);
-static void pmc551_unpoint(struct mtd_info *, loff_t, size_t);
-static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys);
-static int pmc551_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
-
-
-/*
- * Define the PCI ID's if the kernel doesn't define them for us
- */
-#ifndef PCI_VENDOR_ID_V3_SEMI
-#define PCI_VENDOR_ID_V3_SEMI 0x11b0
-#endif
-
-#ifndef PCI_DEVICE_ID_V3_SEMI_V370PDC
-#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
-#endif
-
-
-#define PMC551_PCI_MEM_MAP0 0x50
-#define PMC551_PCI_MEM_MAP1 0x54
-#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
-#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
-#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
-#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
-
-#define PMC551_SDRAM_MA 0x60
-#define PMC551_SDRAM_CMD 0x62
-#define PMC551_DRAM_CFG 0x64
-#define PMC551_SYS_CTRL_REG 0x78
-
-#define PMC551_DRAM_BLK0 0x68
-#define PMC551_DRAM_BLK1 0x6c
-#define PMC551_DRAM_BLK2 0x70
-#define PMC551_DRAM_BLK3 0x74
-#define PMC551_DRAM_BLK_GET_SIZE(x) (524288<<((x>>4)&0x0f))
-#define PMC551_DRAM_BLK_SET_COL_MUX(x,v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
-#define PMC551_DRAM_BLK_SET_ROW_MUX(x,v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
-
-
-#endif /* __MTD_PMC551_H__ */
-
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 9cf4c4c79555..a38e1fa8af01 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -23,6 +23,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/pm_qos.h>
/* FLCTL registers */
#define FLCMNCR(f) (f->reg + 0x0)
@@ -38,6 +39,7 @@
#define FLDTFIFO(f) (f->reg + 0x24)
#define FLECFIFO(f) (f->reg + 0x28)
#define FLTRCR(f) (f->reg + 0x2C)
+#define FLHOLDCR(f) (f->reg + 0x38)
#define FL4ECCRESULT0(f) (f->reg + 0x80)
#define FL4ECCRESULT1(f) (f->reg + 0x84)
#define FL4ECCRESULT2(f) (f->reg + 0x88)
@@ -67,6 +69,30 @@
#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */
#define TYPESEL_SET (0x1 << 0)
+/*
+ * Clock settings using the PULSEx registers from FLCMNCR
+ *
+ * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E
+ * to control the clock divider used between the High-Speed Peripheral Clock
+ * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit
+ * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16
+ * bit version the divider is seperate for the pulse width of high and low
+ * signals.
+ */
+#define PULSE3 (0x1 << 27)
+#define PULSE2 (0x1 << 17)
+#define PULSE1 (0x1 << 15)
+#define PULSE0 (0x1 << 9)
+#define CLK_8B_0_5 PULSE1
+#define CLK_8B_1 0x0
+#define CLK_8B_1_5 (PULSE1 | PULSE2)
+#define CLK_8B_2 PULSE0
+#define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2)
+#define CLK_8B_4 (PULSE0 | PULSE2)
+#define CLK_16B_6L_2H PULSE0
+#define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2)
+#define CLK_16B_12L_4H (PULSE0 | PULSE2)
+
/* FLCMDCR control bits */
#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */
#define ADRMD_E (0x1 << 26) /* Sector address access */
@@ -85,6 +111,15 @@
#define TRSTRT (0x1 << 0) /* translation start */
#define TREND (0x1 << 1) /* translation end */
+/*
+ * FLHOLDCR control bits
+ *
+ * HOLDEN: Bus Occupancy Enable (inverted)
+ * Enable this bit when the external bus might be used in between transfers.
+ * If not set and the bus gets used by other modules, a deadlock occurs.
+ */
+#define HOLDEN (0x1 << 0)
+
/* FL4ECCCR control bits */
#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */
#define _4ECCEND (0x1 << 1) /* 4 symbols end */
@@ -97,6 +132,7 @@ struct sh_flctl {
struct mtd_info mtd;
struct nand_chip chip;
struct platform_device *pdev;
+ struct dev_pm_qos_request pm_qos;
void __iomem *reg;
uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
@@ -108,11 +144,14 @@ struct sh_flctl {
int erase1_page_addr; /* page_addr in ERASE1 cmd */
uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */
uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */
+ uint32_t flcmncr_base; /* base value of FLCMNCR */
int hwecc_cant_correct[4];
unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */
unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
+ unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */
+ unsigned qos_request:1; /* QoS request to prevent deep power shutdown */
};
struct sh_flctl_platform_data {
@@ -121,6 +160,7 @@ struct sh_flctl_platform_data {
unsigned long flcmncr_val;
unsigned has_hwecc:1;
+ unsigned use_holden:1;
};
static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
new file mode 100644
index 000000000000..8ae1726044c3
--- /dev/null
+++ b/include/linux/mtd/spear_smi.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2010 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MTD_SPEAR_SMI_H
+#define __MTD_SPEAR_SMI_H
+
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+/* max possible slots for serial-nor flash chip in the SMI controller */
+#define MAX_NUM_FLASH_CHIP 4
+
+/* macro to define partitions for flash devices */
+#define DEFINE_PARTS(n, of, s) \
+{ \
+ .name = n, \
+ .offset = of, \
+ .size = s, \
+}
+
+/**
+ * struct spear_smi_flash_info - platform structure for passing flash
+ * information
+ *
+ * name: name of the serial nor flash for identification
+ * mem_base: the memory base on which the flash is mapped
+ * size: size of the flash in bytes
+ * partitions: parition details
+ * nr_partitions: number of partitions
+ * fast_mode: whether flash supports fast mode
+ */
+
+struct spear_smi_flash_info {
+ char *name;
+ unsigned long mem_base;
+ unsigned long size;
+ struct mtd_partition *partitions;
+ int nr_partitions;
+ u8 fast_mode;
+};
+
+/**
+ * struct spear_smi_plat_data - platform structure for configuring smi
+ *
+ * clk_rate: clk rate at which SMI must operate
+ * num_flashes: number of flashes present on board
+ * board_flash_info: specific details of each flash present on board
+ */
+struct spear_smi_plat_data {
+ unsigned long clk_rate;
+ int num_flashes;
+ struct spear_smi_flash_info *board_flash_info;
+ struct device_node *np[MAX_NUM_FLASH_CHIP];
+};
+
+#endif /* __MTD_SPEAR_SMI_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 834df8bf08b6..0987146b0637 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -438,7 +438,20 @@ enum limit_by4 {
enum open_delegation_type4 {
NFS4_OPEN_DELEGATE_NONE = 0,
NFS4_OPEN_DELEGATE_READ = 1,
- NFS4_OPEN_DELEGATE_WRITE = 2
+ NFS4_OPEN_DELEGATE_WRITE = 2,
+ NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */
+};
+
+enum why_no_delegation4 { /* new to v4.1 */
+ WND4_NOT_WANTED = 0,
+ WND4_CONTENTION = 1,
+ WND4_RESOURCE = 2,
+ WND4_NOT_SUPP_FTYPE = 3,
+ WND4_WRITE_DELEG_NOT_SUPP_FTYPE = 4,
+ WND4_NOT_SUPP_UPGRADE = 5,
+ WND4_NOT_SUPP_DOWNGRADE = 6,
+ WND4_CANCELLED = 7,
+ WND4_IS_DIR = 8,
};
enum lock_type4 {
diff --git a/include/linux/nfsd/cld.h b/include/linux/nfsd/cld.h
new file mode 100644
index 000000000000..f14a9ab06f1f
--- /dev/null
+++ b/include/linux/nfsd/cld.h
@@ -0,0 +1,56 @@
+/*
+ * Upcall description for nfsdcld communication
+ *
+ * Copyright (c) 2012 Red Hat, Inc.
+ * Author(s): Jeff Layton <jlayton@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _NFSD_CLD_H
+#define _NFSD_CLD_H
+
+/* latest upcall version available */
+#define CLD_UPCALL_VERSION 1
+
+/* defined by RFC3530 */
+#define NFS4_OPAQUE_LIMIT 1024
+
+enum cld_command {
+ Cld_Create, /* create a record for this cm_id */
+ Cld_Remove, /* remove record of this cm_id */
+ Cld_Check, /* is this cm_id allowed? */
+ Cld_GraceDone, /* grace period is complete */
+};
+
+/* representation of long-form NFSv4 client ID */
+struct cld_name {
+ uint16_t cn_len; /* length of cm_id */
+ unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */
+} __attribute__((packed));
+
+/* message struct for communication with userspace */
+struct cld_msg {
+ uint8_t cm_vers; /* upcall version */
+ uint8_t cm_cmd; /* upcall command */
+ int16_t cm_status; /* return code */
+ uint32_t cm_xid; /* transaction id */
+ union {
+ int64_t cm_gracetime; /* grace period start time */
+ struct cld_name cm_name;
+ } __attribute__((packed)) cm_u;
+} __attribute__((packed));
+
+#endif /* !_NFSD_CLD_H */
diff --git a/arch/arm/mach-zynq/include/mach/io.h b/include/linux/platform_data/spear_thermal.h
index 39d9885e0e9a..724f2e1cbbcb 100644
--- a/arch/arm/mach-zynq/include/mach/io.h
+++ b/include/linux/platform_data/spear_thermal.h
@@ -1,6 +1,8 @@
-/* arch/arm/mach-zynq/include/mach/io.h
+/*
+ * SPEAr thermal driver platform data.
*
- * Copyright (C) 2011 Xilinx
+ * Copyright (C) 2011-2012 ST Microelectronics
+ * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -10,24 +12,15 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
*/
+#ifndef SPEAR_THERMAL_H
+#define SPEAR_THERMAL_H
-#ifndef __MACH_IO_H__
-#define __MACH_IO_H__
-
-/* Allow IO space to be anywhere in the memory */
-
-#define IO_SPACE_LIMIT 0xffff
-
-/* IO address mapping macros, nothing special at this time but required */
-
-#ifdef __ASSEMBLER__
-#define IOMEM(x) (x)
-#else
-#define IOMEM(x) ((void __force __iomem *)(x))
-#endif
-
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
+/* SPEAr Thermal Sensor Platform Data */
+struct spear_thermal_pdata {
+ /* flags used to enable thermal sensor */
+ unsigned int thermal_flags;
+};
-#endif
+#endif /* SPEAR_THERMAL_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index fe99211fb2b8..e01b167e66f0 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -27,6 +27,8 @@
#define MAX17042_BATTERY_FULL (100)
#define MAX17042_DEFAULT_SNS_RESISTOR (10000)
+#define MAX17042_CHARACTERIZATION_DATA_SIZE 48
+
enum max17042_register {
MAX17042_STATUS = 0x00,
MAX17042_VALRT_Th = 0x01,
@@ -40,11 +42,11 @@ enum max17042_register {
MAX17042_VCELL = 0x09,
MAX17042_Current = 0x0A,
MAX17042_AvgCurrent = 0x0B,
- MAX17042_Qresidual = 0x0C,
+
MAX17042_SOC = 0x0D,
MAX17042_AvSOC = 0x0E,
MAX17042_RemCap = 0x0F,
- MAX17402_FullCAP = 0x10,
+ MAX17042_FullCAP = 0x10,
MAX17042_TTE = 0x11,
MAX17042_V_empty = 0x12,
@@ -62,14 +64,14 @@ enum max17042_register {
MAX17042_AvCap = 0x1F,
MAX17042_ManName = 0x20,
MAX17042_DevName = 0x21,
- MAX17042_DevChem = 0x22,
+ MAX17042_FullCAPNom = 0x23,
MAX17042_TempNom = 0x24,
- MAX17042_TempCold = 0x25,
+ MAX17042_TempLim = 0x25,
MAX17042_TempHot = 0x26,
MAX17042_AIN = 0x27,
MAX17042_LearnCFG = 0x28,
- MAX17042_SHFTCFG = 0x29,
+ MAX17042_FilterCFG = 0x29,
MAX17042_RelaxCFG = 0x2A,
MAX17042_MiscCFG = 0x2B,
MAX17042_TGAIN = 0x2C,
@@ -77,22 +79,41 @@ enum max17042_register {
MAX17042_CGAIN = 0x2E,
MAX17042_COFF = 0x2F,
- MAX17042_Q_empty = 0x33,
+ MAX17042_MaskSOC = 0x32,
+ MAX17042_SOC_empty = 0x33,
MAX17042_T_empty = 0x34,
+ MAX17042_FullCAP0 = 0x35,
+ MAX17042_LAvg_empty = 0x36,
+ MAX17042_FCTC = 0x37,
MAX17042_RCOMP0 = 0x38,
MAX17042_TempCo = 0x39,
- MAX17042_Rx = 0x3A,
- MAX17042_T_empty0 = 0x3B,
+ MAX17042_EmptyTempCo = 0x3A,
+ MAX17042_K_empty0 = 0x3B,
MAX17042_TaskPeriod = 0x3C,
MAX17042_FSTAT = 0x3D,
MAX17042_SHDNTIMER = 0x3F,
- MAX17042_VFRemCap = 0x4A,
+ MAX17042_dQacc = 0x45,
+ MAX17042_dPacc = 0x46,
+
+ MAX17042_VFSOC0 = 0x48,
MAX17042_QH = 0x4D,
MAX17042_QL = 0x4E,
+
+ MAX17042_VFSOC0Enable = 0x60,
+ MAX17042_MLOCKReg1 = 0x62,
+ MAX17042_MLOCKReg2 = 0x63,
+
+ MAX17042_MODELChrTbl = 0x80,
+
+ MAX17042_OCV = 0xEE,
+
+ MAX17042_OCVInternal = 0xFB,
+
+ MAX17042_VFSOC = 0xFF,
};
/*
@@ -105,10 +126,64 @@ struct max17042_reg_data {
u16 data;
};
+struct max17042_config_data {
+ /* External current sense resistor value in milli-ohms */
+ u32 cur_sense_val;
+
+ /* A/D measurement */
+ u16 tgain; /* 0x2C */
+ u16 toff; /* 0x2D */
+ u16 cgain; /* 0x2E */
+ u16 coff; /* 0x2F */
+
+ /* Alert / Status */
+ u16 valrt_thresh; /* 0x01 */
+ u16 talrt_thresh; /* 0x02 */
+ u16 soc_alrt_thresh; /* 0x03 */
+ u16 config; /* 0x01D */
+ u16 shdntimer; /* 0x03F */
+
+ /* App data */
+ u16 design_cap; /* 0x18 */
+ u16 ichgt_term; /* 0x1E */
+
+ /* MG3 config */
+ u16 at_rate; /* 0x04 */
+ u16 learn_cfg; /* 0x28 */
+ u16 filter_cfg; /* 0x29 */
+ u16 relax_cfg; /* 0x2A */
+ u16 misc_cfg; /* 0x2B */
+ u16 masksoc; /* 0x32 */
+
+ /* MG3 save and restore */
+ u16 fullcap; /* 0x10 */
+ u16 fullcapnom; /* 0x23 */
+ u16 socempty; /* 0x33 */
+ u16 lavg_empty; /* 0x36 */
+ u16 dqacc; /* 0x45 */
+ u16 dpacc; /* 0x46 */
+
+ /* Cell technology from power_supply.h */
+ u16 cell_technology;
+
+ /* Cell Data */
+ u16 vempty; /* 0x12 */
+ u16 temp_nom; /* 0x24 */
+ u16 temp_lim; /* 0x25 */
+ u16 fctc; /* 0x37 */
+ u16 rcomp0; /* 0x38 */
+ u16 tcompc0; /* 0x39 */
+ u16 empty_tempco; /* 0x3A */
+ u16 kempty0; /* 0x3B */
+ u16 cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE];
+} __packed;
+
struct max17042_platform_data {
struct max17042_reg_data *init_data;
+ struct max17042_config_data *config_data;
int num_init_data; /* Number of enties in init_data array */
bool enable_current_sense;
+ bool enable_por_init; /* Use POR init from Maxim appnote */
/*
* R_sns in micro-ohms.
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
new file mode 100644
index 000000000000..b3cb20dab55f
--- /dev/null
+++ b/include/linux/power/smb347-charger.h
@@ -0,0 +1,117 @@
+/*
+ * Summit Microelectronics SMB347 Battery Charger Driver
+ *
+ * Copyright (C) 2011, Intel Corporation
+ *
+ * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SMB347_CHARGER_H
+#define SMB347_CHARGER_H
+
+#include <linux/types.h>
+#include <linux/power_supply.h>
+
+enum {
+ /* use the default compensation method */
+ SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
+
+ SMB347_SOFT_TEMP_COMPENSATE_NONE,
+ SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
+ SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
+};
+
+/* Use default factory programmed value for hard/soft temperature limit */
+#define SMB347_TEMP_USE_DEFAULT -273
+
+/*
+ * Charging enable can be controlled by software (via i2c) by
+ * smb347-charger driver or by EN pin (active low/high).
+ */
+enum smb347_chg_enable {
+ SMB347_CHG_ENABLE_SW,
+ SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
+ SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
+};
+
+/**
+ * struct smb347_charger_platform_data - platform data for SMB347 charger
+ * @battery_info: Information about the battery
+ * @max_charge_current: maximum current (in uA) the battery can be charged
+ * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
+ * @pre_charge_current: current (in uA) to use in pre-charging phase
+ * @termination_current: current (in uA) used to determine when the
+ * charging cycle terminates
+ * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
+ * pre-charge to fast charge mode
+ * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
+ * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
+ * input
+ * @chip_temp_threshold: die temperature where device starts limiting charge
+ * current [%100 - %130] (in degree C)
+ * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
+ * granularity is 5 deg C.
+ * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
+ * granularity is 5 deg C.
+ * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
+ * granularity is 5 deg C.
+ * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
+ * granularity is 5 deg C.
+ * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
+ * @soft_temp_limit_compensation: compensation method when soft temperature
+ * limit is hit
+ * @charge_current_compensation: current (in uA) for charging compensation
+ * current when temperature hits soft limits
+ * @use_mains: AC/DC input can be used
+ * @use_usb: USB input can be used
+ * @use_usb_otg: USB OTG output can be used (not implemented yet)
+ * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
+ * @enable_control: how charging enable/disable is controlled
+ * (driver/pin controls)
+ *
+ * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
+ * hardware support for these. This is useful when we want to have for
+ * example OTG charging controlled via OTG transceiver driver and not by
+ * the SMB347 hardware.
+ *
+ * Hard and soft temperature limit values are given as described in the
+ * device data sheet and assuming NTC beta value is %3750. Even if this is
+ * not the case, these values should be used. They can be mapped to the
+ * corresponding NTC beta values with the help of table %2 in the data
+ * sheet. So for example if NTC beta is %3375 and we want to program hard
+ * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
+ *
+ * If zero value is given in any of the current and voltage values, the
+ * factory programmed default will be used. For soft/hard temperature
+ * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
+ */
+struct smb347_charger_platform_data {
+ struct power_supply_info battery_info;
+ unsigned int max_charge_current;
+ unsigned int max_charge_voltage;
+ unsigned int pre_charge_current;
+ unsigned int termination_current;
+ unsigned int pre_to_fast_voltage;
+ unsigned int mains_current_limit;
+ unsigned int usb_hc_current_limit;
+ unsigned int chip_temp_threshold;
+ int soft_cold_temp_limit;
+ int soft_hot_temp_limit;
+ int hard_cold_temp_limit;
+ int hard_hot_temp_limit;
+ bool suspend_on_hard_temp_limit;
+ unsigned int soft_temp_limit_compensation;
+ unsigned int charge_current_compensation;
+ bool use_mains;
+ bool use_usb;
+ bool use_usb_otg;
+ int irq_gpio;
+ enum smb347_chg_enable enable_control;
+};
+
+#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 93f4d035076b..fcabfb4873c8 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -202,7 +202,8 @@ struct rtc_device
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled;
struct work_struct irqwork;
-
+ /* Some hardware can't support UIE mode */
+ int uie_unsupported;
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
struct work_struct uie_task;
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index b160645f5599..6aed0805927f 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -3,6 +3,23 @@
#include <linux/ioport.h>
+#ifdef CONFIG_SUPERH
+#define INTC_NR_IRQS 512
+#else
+#define INTC_NR_IRQS 1024
+#endif
+
+/*
+ * Convert back and forth between INTEVT and IRQ values.
+ */
+#ifdef CONFIG_CPU_HAS_INTEVT
+#define evt2irq(evt) (((evt) >> 5) - 16)
+#define irq2evt(irq) (((irq) + 16) << 5)
+#else
+#define evt2irq(evt) (evt)
+#define irq2evt(irq) (irq)
+#endif
+
typedef unsigned char intc_enum;
struct intc_vect {
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index e253ccd7a604..51df117abe46 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -67,7 +67,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
-#ifdef CONFIG_INLINE_SPIN_UNLOCK
+#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index c14fe86dac59..0b8e3e6bdacf 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -190,7 +190,7 @@ extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
- u32, u64, u32);
+ __be32, __be64, u32);
extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
struct rpcrdma_msg *,
struct rpcrdma_msg *,
@@ -292,7 +292,7 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
if (wr_ary) {
rp_ary = (struct rpcrdma_write_array *)
&wr_ary->
- wc_array[wr_ary->wc_nchunks].wc_target.rs_length;
+ wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
goto found_it;
}
diff --git a/include/linux/sysinfo.h b/include/linux/sysinfo.h
new file mode 100644
index 000000000000..934335a22522
--- /dev/null
+++ b/include/linux/sysinfo.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SYSINFO_H
+#define _LINUX_SYSINFO_H
+
+#include <linux/types.h>
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ __kernel_long_t uptime; /* Seconds since boot */
+ __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */
+ __kernel_ulong_t totalram; /* Total usable main memory size */
+ __kernel_ulong_t freeram; /* Available memory size */
+ __kernel_ulong_t sharedram; /* Amount of shared memory */
+ __kernel_ulong_t bufferram; /* Memory used by buffers */
+ __kernel_ulong_t totalswap; /* Total swap space size */
+ __kernel_ulong_t freeswap; /* swap space still available */
+ __u16 procs; /* Number of current processes */
+ __u16 pad; /* Explicit padding for m68k */
+ __kernel_ulong_t totalhigh; /* Total high memory size */
+ __kernel_ulong_t freehigh; /* Available high memory size */
+ __u32 mem_unit; /* Memory unit size in bytes */
+ char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; /* Padding: libc5 uses this.. */
+};
+
+#endif /* _LINUX_SYSINFO_H */
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index 1dba6ee55203..c75128bed5fa 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -143,7 +143,6 @@ static inline int tboot_enabled(void)
extern void tboot_probe(void);
extern void tboot_shutdown(u32 shutdown_type);
-extern void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control);
extern struct acpi_table_header *tboot_get_dmar_table(
struct acpi_table_header *dmar_tbl);
extern int tboot_force_iommu(void);
diff --git a/include/linux/time.h b/include/linux/time.h
index b3061782dec3..33a92ead4d88 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -116,7 +116,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
extern int update_persistent_clock(struct timespec now);
-extern int no_sync_cmos_clock __read_mostly;
void timekeeping_init(void);
extern int timekeeping_suspended;
@@ -256,6 +255,7 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
a->tv_nsec = ns;
}
+
#endif /* __KERNEL__ */
#define NFDBITS __NFDBITS
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b75e1864ed19..99bc88b1fc02 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -252,7 +252,7 @@ extern void ntp_clear(void);
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
extern u64 ntp_tick_length(void);
-extern void second_overflow(void);
+extern int second_overflow(unsigned long secs);
extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 84f3001a568d..91b91e805673 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -6,6 +6,7 @@
#include <linux/writeback.h>
#include <linux/tracepoint.h>
+#include <trace/events/gfpflags.h>
struct btrfs_root;
struct btrfs_fs_info;
@@ -862,6 +863,49 @@ TRACE_EVENT(btrfs_setup_cluster,
__entry->size, __entry->max_size, __entry->bitmap)
);
+struct extent_state;
+TRACE_EVENT(alloc_extent_state,
+
+ TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
+
+ TP_ARGS(state, mask, IP),
+
+ TP_STRUCT__entry(
+ __field(struct extent_state *, state)
+ __field(gfp_t, mask)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __entry->state = state,
+ __entry->mask = mask,
+ __entry->ip = IP
+ ),
+
+ TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+ show_gfp_flags(__entry->mask), (void *)__entry->ip)
+);
+
+TRACE_EVENT(free_extent_state,
+
+ TP_PROTO(struct extent_state *state, unsigned long IP),
+
+ TP_ARGS(state, IP),
+
+ TP_STRUCT__entry(
+ __field(struct extent_state *, state)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __entry->state = state,
+ __entry->ip = IP
+ ),
+
+ TP_printk(" state=%p; caller = %pF", __entry->state,
+ (void *)__entry->ip)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/ipc/compat.c b/ipc/compat.c
index 845a28738d3a..a6df704f521e 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -27,6 +27,7 @@
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/syscalls.h>
+#include <linux/ptrace.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -117,6 +118,7 @@ extern int sem_ctls[];
static inline int compat_ipc_parse_version(int *cmd)
{
+#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
int version = *cmd & IPC_64;
/* this is tricky: architectures that have support for the old
@@ -128,6 +130,10 @@ static inline int compat_ipc_parse_version(int *cmd)
*cmd &= ~IPC_64;
#endif
return version;
+#else
+ /* With the asm-generic APIs, we always use the 64-bit versions. */
+ return IPC_64;
+#endif
}
static inline int __get_compat_ipc64_perm(struct ipc64_perm *p64,
@@ -232,10 +238,9 @@ static inline int put_compat_semid_ds(struct semid64_ds *s,
return err;
}
-long compat_sys_semctl(int first, int second, int third, void __user *uptr)
+static long do_compat_semctl(int first, int second, int third, u32 pad)
{
union semun fourth;
- u32 pad;
int err, err2;
struct semid64_ds s64;
struct semid64_ds __user *up64;
@@ -243,10 +248,6 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
memset(&s64, 0, sizeof(s64));
- if (!uptr)
- return -EINVAL;
- if (get_user(pad, (u32 __user *) uptr))
- return -EFAULT;
if ((third & (~IPC_64)) == SETVAL)
fourth.val = (int) pad;
else
@@ -305,6 +306,18 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
return err;
}
+#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
+long compat_sys_semctl(int first, int second, int third, void __user *uptr)
+{
+ u32 pad;
+
+ if (!uptr)
+ return -EINVAL;
+ if (get_user(pad, (u32 __user *) uptr))
+ return -EFAULT;
+ return do_compat_semctl(first, second, third, pad);
+}
+
long compat_sys_msgsnd(int first, int second, int third, void __user *uptr)
{
struct compat_msgbuf __user *up = uptr;
@@ -353,6 +366,37 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
out:
return err;
}
+#else
+long compat_sys_semctl(int semid, int semnum, int cmd, int arg)
+{
+ return do_compat_semctl(semid, semnum, cmd, arg);
+}
+
+long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
+ size_t msgsz, int msgflg)
+{
+ compat_long_t mtype;
+
+ if (get_user(mtype, &msgp->mtype))
+ return -EFAULT;
+ return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
+}
+
+long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
+ size_t msgsz, long msgtyp, int msgflg)
+{
+ long err, mtype;
+
+ err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
+ if (err < 0)
+ goto out;
+
+ if (put_user(mtype, &msgp->mtype))
+ err = -EFAULT;
+ out:
+ return err;
+}
+#endif
static inline int get_compat_msqid64(struct msqid64_ds *m64,
struct compat_msqid64_ds __user *up64)
@@ -470,6 +514,7 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
return err;
}
+#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
void __user *uptr)
{
@@ -485,6 +530,19 @@ long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
uaddr = compat_ptr(third);
return put_user(raddr, uaddr);
}
+#else
+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg)
+{
+ unsigned long ret;
+ long err;
+
+ err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret);
+ if (err)
+ return err;
+ force_successful_syscall_return();
+ return (long)ret;
+}
+#endif
static inline int get_compat_shmid64_ds(struct shmid64_ds *s64,
struct compat_shmid64_ds __user *up64)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 5068e2a4e75f..2251882daf53 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -124,8 +124,8 @@ config INLINE_SPIN_LOCK_IRQSAVE
def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
ARCH_INLINE_SPIN_LOCK_IRQSAVE
-config INLINE_SPIN_UNLOCK
- def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
+config UNINLINE_SPIN_UNLOCK
+ bool
config INLINE_SPIN_UNLOCK_BH
def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 24e7cb0ba26a..3f9c97419f02 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -36,6 +36,7 @@ config PREEMPT_VOLUNTARY
config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
select PREEMPT_COUNT
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
help
This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f4ea4b6f3cf1..ed64ccac67c9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1883,7 +1883,7 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
*/
int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
- int retval;
+ int retval = 0;
struct cgroup_subsys *ss, *failed_ss = NULL;
struct cgroup *oldcgrp;
struct cgroupfs_root *root = cgrp->root;
diff --git a/kernel/compat.c b/kernel/compat.c
index f346cedfe24d..74ff8498809a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -31,11 +31,10 @@
#include <asm/uaccess.h>
/*
- * Note that the native side is already converted to a timespec, because
- * that's what we want anyway.
+ * Get/set struct timeval with struct timespec on the native side
*/
-static int compat_get_timeval(struct timespec *o,
- struct compat_timeval __user *i)
+static int compat_get_timeval_convert(struct timespec *o,
+ struct compat_timeval __user *i)
{
long usec;
@@ -46,8 +45,8 @@ static int compat_get_timeval(struct timespec *o,
return 0;
}
-static int compat_put_timeval(struct compat_timeval __user *o,
- struct timeval *i)
+static int compat_put_timeval_convert(struct compat_timeval __user *o,
+ struct timeval *i)
{
return (put_user(i->tv_sec, &o->tv_sec) ||
put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
@@ -117,7 +116,7 @@ asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
if (tv) {
struct timeval ktv;
do_gettimeofday(&ktv);
- if (compat_put_timeval(tv, &ktv))
+ if (compat_put_timeval_convert(tv, &ktv))
return -EFAULT;
}
if (tz) {
@@ -135,7 +134,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
struct timezone ktz;
if (tv) {
- if (compat_get_timeval(&kts, tv))
+ if (compat_get_timeval_convert(&kts, tv))
return -EFAULT;
}
if (tz) {
@@ -146,12 +145,29 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
+int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv)
+{
+ return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
+ __get_user(tv->tv_sec, &ctv->tv_sec) ||
+ __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(get_compat_timeval);
+
+int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv)
+{
+ return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) ||
+ __put_user(tv->tv_sec, &ctv->tv_sec) ||
+ __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(put_compat_timeval);
+
int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
{
return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
__get_user(ts->tv_sec, &cts->tv_sec) ||
__get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL_GPL(get_compat_timespec);
int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
{
@@ -161,6 +177,42 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
}
EXPORT_SYMBOL_GPL(put_compat_timespec);
+int compat_get_timeval(struct timeval *tv, const void __user *utv)
+{
+ if (COMPAT_USE_64BIT_TIME)
+ return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0;
+ else
+ return get_compat_timeval(tv, utv);
+}
+EXPORT_SYMBOL_GPL(compat_get_timeval);
+
+int compat_put_timeval(const struct timeval *tv, void __user *utv)
+{
+ if (COMPAT_USE_64BIT_TIME)
+ return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0;
+ else
+ return put_compat_timeval(tv, utv);
+}
+EXPORT_SYMBOL_GPL(compat_put_timeval);
+
+int compat_get_timespec(struct timespec *ts, const void __user *uts)
+{
+ if (COMPAT_USE_64BIT_TIME)
+ return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0;
+ else
+ return get_compat_timespec(ts, uts);
+}
+EXPORT_SYMBOL_GPL(compat_get_timespec);
+
+int compat_put_timespec(const struct timespec *ts, void __user *uts)
+{
+ if (COMPAT_USE_64BIT_TIME)
+ return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0;
+ else
+ return put_compat_timespec(ts, uts);
+}
+EXPORT_SYMBOL_GPL(compat_put_timespec);
+
static long compat_nanosleep_restart(struct restart_block *restart)
{
struct compat_timespec __user *rmtp;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1010cc61931f..b96ad75b7e64 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
mutex_unlock(&callback_mutex);
}
-int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
+void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
const struct cpuset *cs;
- int cpu;
rcu_read_lock();
cs = task_cs(tsk);
@@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
* set any mask even if it is not right from task_cs() pov,
* the pending set_cpus_allowed_ptr() will fix things.
+ *
+ * select_fallback_rq() will fix things ups and set cpu_possible_mask
+ * if required.
*/
-
- cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
- if (cpu >= nr_cpu_ids) {
- /*
- * Either tsk->cpus_allowed is wrong (see above) or it
- * is actually empty. The latter case is only possible
- * if we are racing with remove_tasks_in_empty_cpuset().
- * Like above we can temporary set any mask and rely on
- * set_cpus_allowed_ptr() as synchronization point.
- */
- do_set_cpus_allowed(tsk, cpu_possible_mask);
- cpu = cpumask_any(cpu_active_mask);
- }
-
- return cpu;
}
void cpuset_init_current_mems_allowed(void)
diff --git a/kernel/exit.c b/kernel/exit.c
index 3db1909faed9..d8bd3b425fa7 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -474,7 +474,7 @@ static void close_files(struct files_struct * files)
i = j * __NFDBITS;
if (i >= fdt->max_fds)
break;
- set = fdt->open_fds->fds_bits[j++];
+ set = fdt->open_fds[j++];
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 5a38bf4de641..cf1a4a68ce44 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -13,7 +13,7 @@ config GENERIC_HARDIRQS
# Options selectable by the architecture code
# Make sparse irq Kconfig switch below available
-config HAVE_SPARSE_IRQ
+config MAY_HAVE_SPARSE_IRQ
bool
# Enable the generic irq autoprobe mechanism
@@ -56,13 +56,22 @@ config GENERIC_IRQ_CHIP
config IRQ_DOMAIN
bool
+config IRQ_DOMAIN_DEBUG
+ bool "Expose hardware/virtual IRQ mapping via debugfs"
+ depends on IRQ_DOMAIN && DEBUG_FS
+ help
+ This option will show the mapping relationship between hardware irq
+ numbers and Linux irq numbers. The mapping is exposed via debugfs
+ in the file "virq_mapping".
+
+ If you don't know what this means you don't need it.
+
# Support forced irq threading
config IRQ_FORCED_THREADING
bool
config SPARSE_IRQ
- bool "Support sparse irq numbering"
- depends on HAVE_SPARSE_IRQ
+ bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
---help---
Sparse irq numbering is useful for distro kernels that want
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index af48e59bc2ff..3601f3fbf67c 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -632,7 +632,7 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
return revmap[hwirq];
}
-#ifdef CONFIG_VIRQ_DEBUG
+#ifdef CONFIG_IRQ_DOMAIN_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
unsigned long flags;
@@ -668,7 +668,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
data = irq_desc_get_chip_data(desc);
seq_printf(m, "0x%16p ", data);
- if (desc->irq_data.domain->of_node)
+ if (desc->irq_data.domain && desc->irq_data.domain->of_node)
p = desc->irq_data.domain->of_node->full_name;
else
p = none;
@@ -695,14 +695,14 @@ static const struct file_operations virq_debug_fops = {
static int __init irq_debugfs_init(void)
{
- if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
+ if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
NULL, &virq_debug_fops) == NULL)
return -ENOMEM;
return 0;
}
__initcall(irq_debugfs_init);
-#endif /* CONFIG_VIRQ_DEBUG */
+#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 157fb9b2b186..e3ed0ecee7c7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
- int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+ enum { cpuset, possible, fail } state = cpuset;
+ int dest_cpu;
/* Look for allowed, online CPU in same node. */
- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ for_each_cpu_mask(dest_cpu, *nodemask) {
+ if (!cpu_online(dest_cpu))
+ continue;
+ if (!cpu_active(dest_cpu))
+ continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu;
+ }
- /* Any allowed, online CPU? */
- dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
- if (dest_cpu < nr_cpu_ids)
- return dest_cpu;
+ for (;;) {
+ /* Any allowed, online CPU? */
+ for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
+ if (!cpu_online(dest_cpu))
+ continue;
+ if (!cpu_active(dest_cpu))
+ continue;
+ goto out;
+ }
- /* No more Mr. Nice Guy. */
- dest_cpu = cpuset_cpus_allowed_fallback(p);
- /*
- * Don't tell them about moving exiting tasks or
- * kernel threads (both mm NULL), since they never
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
- printk_sched("process %d (%s) no longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, cpu);
+ switch (state) {
+ case cpuset:
+ /* No more Mr. Nice Guy. */
+ cpuset_cpus_allowed_fallback(p);
+ state = possible;
+ break;
+
+ case possible:
+ do_set_cpus_allowed(p, cpu_possible_mask);
+ state = fail;
+ break;
+
+ case fail:
+ BUG();
+ break;
+ }
+ }
+
+out:
+ if (state != cpuset) {
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit()) {
+ printk_sched("process %d (%s) no longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
}
return dest_cpu;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 94340c7544a9..0d97ebdc58f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
#endif /* CONFIG_FAIR_GROUP_SCHED */
-static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec);
+static __always_inline
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
@@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
__clear_buddies_skip(se);
}
-static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
resched_task(rq_of(cfs_rq)->curr);
}
-static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec)
+static __always_inline
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
{
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return;
@@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq)
}
#else /* CONFIG_CFS_BANDWIDTH */
-static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec) {}
+static __always_inline
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
-static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
+static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b60dad720173..44af55e6d5d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
next_idx:
if (idx >= MAX_RT_PRIO)
continue;
- if (next && next->prio < idx)
+ if (next && next->prio <= idx)
continue;
list_for_each_entry(rt_se, array->queue + idx, run_list) {
struct task_struct *p;
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 84c7d96918bf..5cdd8065a3ce 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -163,7 +163,7 @@ void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_lock_bh);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK
+#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
{
__raw_spin_unlock(lock);
diff --git a/kernel/time.c b/kernel/time.c
index 73e416db0a1e..ba744cf80696 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
return error;
if (tz) {
- /* SMP safe, global irq locking makes it work. */
sys_tz = *tz;
update_vsyscall_tz();
if (firsttime) {
@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
}
}
if (tv)
- {
- /* SMP safe, again the code in arch/foo/time.c should
- * globally block out interrupts when it runs.
- */
return do_settimeofday(tv);
- }
return 0;
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 8a46f5d64504..8a538c55fc7b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
return 0;
}
+static inline void alarmtimer_rtc_timer_init(void)
+{
+ rtc_timer_init(&rtctimer, NULL, NULL);
+}
+
static struct class_interface alarmtimer_rtc_interface = {
.add_dev = &alarmtimer_rtc_add_device,
};
@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void)
#define rtcdev (NULL)
static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
static inline void alarmtimer_rtc_interface_remove(void) { }
+static inline void alarmtimer_rtc_timer_init(void) { }
#endif
/**
@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void)
.nsleep = alarm_timer_nsleep,
};
+ alarmtimer_rtc_timer_init();
+
posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index a45ca167ab24..c9583382141a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
{
u64 ret;
/*
- * We won't try to correct for more then 11% adjustments (110,000 ppm),
+ * We won't try to correct for more than 11% adjustments (110,000 ppm),
*/
ret = (u64)cs->mult * 11;
do_div(ret,100);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6e039b144daf..f03fd83b170b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -34,8 +34,6 @@ unsigned long tick_nsec;
static u64 tick_length;
static u64 tick_length_base;
-static struct hrtimer leap_timer;
-
#define MAX_TICKADJ 500LL /* usecs */
#define MAX_TICKADJ_SCALED \
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
@@ -381,70 +379,63 @@ u64 ntp_tick_length(void)
/*
- * Leap second processing. If in leap-insert state at the end of the
- * day, the system clock is set back one second; if in leap-delete
- * state, the system clock is set ahead one second.
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ * Also handles leap second processing, and returns leap offset
*/
-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
+int second_overflow(unsigned long secs)
{
- enum hrtimer_restart res = HRTIMER_NORESTART;
- unsigned long flags;
+ s64 delta;
int leap = 0;
+ unsigned long flags;
spin_lock_irqsave(&ntp_lock, flags);
+
+ /*
+ * Leap second processing. If in leap-insert state at the end of the
+ * day, the system clock is set back one second; if in leap-delete
+ * state, the system clock is set ahead one second.
+ */
switch (time_state) {
case TIME_OK:
+ if (time_status & STA_INS)
+ time_state = TIME_INS;
+ else if (time_status & STA_DEL)
+ time_state = TIME_DEL;
break;
case TIME_INS:
- leap = -1;
- time_state = TIME_OOP;
- printk(KERN_NOTICE
- "Clock: inserting leap second 23:59:60 UTC\n");
- hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
- res = HRTIMER_RESTART;
+ if (secs % 86400 == 0) {
+ leap = -1;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE
+ "Clock: inserting leap second 23:59:60 UTC\n");
+ }
break;
case TIME_DEL:
- leap = 1;
- time_tai--;
- time_state = TIME_WAIT;
- printk(KERN_NOTICE
- "Clock: deleting leap second 23:59:59 UTC\n");
+ if ((secs + 1) % 86400 == 0) {
+ leap = 1;
+ time_tai--;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE
+ "Clock: deleting leap second 23:59:59 UTC\n");
+ }
break;
case TIME_OOP:
time_tai++;
time_state = TIME_WAIT;
- /* fall through */
+ break;
+
case TIME_WAIT:
if (!(time_status & (STA_INS | STA_DEL)))
time_state = TIME_OK;
break;
}
- spin_unlock_irqrestore(&ntp_lock, flags);
- /*
- * We have to call this outside of the ntp_lock to keep
- * the proper locking hierarchy
- */
- if (leap)
- timekeeping_leap_insert(leap);
-
- return res;
-}
-
-/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
- */
-void second_overflow(void)
-{
- s64 delta;
- unsigned long flags;
-
- spin_lock_irqsave(&ntp_lock, flags);
/* Bump the maxerror field */
time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -481,15 +472,17 @@ void second_overflow(void)
tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
<< NTP_SCALE_SHIFT;
time_adjust = 0;
+
+
+
out:
spin_unlock_irqrestore(&ntp_lock, flags);
+
+ return leap;
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
-/* Disable the cmos update - used by virtualization and embedded */
-int no_sync_cmos_clock __read_mostly;
-
static void sync_cmos_clock(struct work_struct *work);
static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -536,35 +529,13 @@ static void sync_cmos_clock(struct work_struct *work)
static void notify_cmos_timer(void)
{
- if (!no_sync_cmos_clock)
- schedule_delayed_work(&sync_cmos_work, 0);
+ schedule_delayed_work(&sync_cmos_work, 0);
}
#else
static inline void notify_cmos_timer(void) { }
#endif
-/*
- * Start the leap seconds timer:
- */
-static inline void ntp_start_leap_timer(struct timespec *ts)
-{
- long now = ts->tv_sec;
-
- if (time_status & STA_INS) {
- time_state = TIME_INS;
- now += 86400 - now % 86400;
- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
-
- return;
- }
-
- if (time_status & STA_DEL) {
- time_state = TIME_DEL;
- now += 86400 - (now + 1) % 86400;
- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
- }
-}
/*
* Propagate a new txc->status value into the NTP state:
@@ -589,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
time_status &= STA_RONLY;
time_status |= txc->status & ~STA_RONLY;
- switch (time_state) {
- case TIME_OK:
- ntp_start_leap_timer(ts);
- break;
- case TIME_INS:
- case TIME_DEL:
- time_state = TIME_OK;
- ntp_start_leap_timer(ts);
- case TIME_WAIT:
- if (!(time_status & (STA_INS | STA_DEL)))
- time_state = TIME_OK;
- break;
- case TIME_OOP:
- hrtimer_restart(&leap_timer);
- break;
- }
}
/*
* Called with the xtime lock held, so we can access and modify
@@ -686,9 +641,6 @@ int do_adjtimex(struct timex *txc)
(txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ))
return -EINVAL;
-
- if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
- hrtimer_cancel(&leap_timer);
}
if (txc->modes & ADJ_SETOFFSET) {
@@ -1010,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
void __init ntp_init(void)
{
ntp_clear();
- hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- leap_timer.function = ntp_leap_second;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 15be32e19c6e..d66b21308f7c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp)
}
-void timekeeping_leap_insert(int leapsecond)
-{
- unsigned long flags;
-
- write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeper.xtime.tv_sec += leapsecond;
- timekeeper.wall_to_monotonic.tv_sec -= leapsecond;
- timekeeping_update(false);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
-
-}
-
/**
* timekeeping_forward_now - update clock to the current time
*
@@ -448,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
static int change_clocksource(void *data)
{
struct clocksource *new, *old;
+ unsigned long flags;
new = (struct clocksource *) data;
+ write_seqlock_irqsave(&timekeeper.lock, flags);
+
timekeeping_forward_now();
if (!new->enable || new->enable(new) == 0) {
old = timekeeper.clock;
@@ -458,6 +449,10 @@ static int change_clocksource(void *data)
if (old->disable)
old->disable(old);
}
+ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
+
return 0;
}
@@ -827,7 +822,7 @@ static void timekeeping_adjust(s64 offset)
int adj;
/*
- * The point of this is to check if the error is greater then half
+ * The point of this is to check if the error is greater than half
* an interval.
*
* First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
@@ -835,7 +830,7 @@ static void timekeeping_adjust(s64 offset)
* Note we subtract one in the shift, so that error is really error*2.
* This "saves" dividing(shifting) interval twice, but keeps the
* (error > interval) comparison as still measuring if error is
- * larger then half an interval.
+ * larger than half an interval.
*
* Note: It does not "save" on aggravation when reading the code.
*/
@@ -843,7 +838,7 @@ static void timekeeping_adjust(s64 offset)
if (error > interval) {
/*
* We now divide error by 4(via shift), which checks if
- * the error is greater then twice the interval.
+ * the error is greater than twice the interval.
* If it is greater, we need a bigadjust, if its smaller,
* we can adjust by 1.
*/
@@ -874,13 +869,15 @@ static void timekeeping_adjust(s64 offset)
} else /* No adjustment needed */
return;
- WARN_ONCE(timekeeper.clock->maxadj &&
- (timekeeper.mult + adj > timekeeper.clock->mult +
- timekeeper.clock->maxadj),
- "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ if (unlikely(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj >
+ timekeeper.clock->mult + timekeeper.clock->maxadj))) {
+ printk_once(KERN_WARNING
+ "Adjusting %s more than 11%% (%ld vs %ld)\n",
timekeeper.clock->name, (long)timekeeper.mult + adj,
(long)timekeeper.clock->mult +
timekeeper.clock->maxadj);
+ }
/*
* So the following can be confusing.
*
@@ -952,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
u64 raw_nsecs;
- /* If the offset is smaller then a shifted interval, do nothing */
+ /* If the offset is smaller than a shifted interval, do nothing */
if (offset < timekeeper.cycle_interval<<shift)
return offset;
@@ -962,9 +959,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
while (timekeeper.xtime_nsec >= nsecps) {
+ int leap;
timekeeper.xtime_nsec -= nsecps;
timekeeper.xtime.tv_sec++;
- second_overflow();
+ leap = second_overflow(timekeeper.xtime.tv_sec);
+ timekeeper.xtime.tv_sec += leap;
}
/* Accumulate raw time */
@@ -1018,13 +1017,13 @@ static void update_wall_time(void)
* With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently,
* we calculate the largest doubling multiple of cycle_intervals
- * that is smaller then the offset. We then accumulate that
+ * that is smaller than the offset. We then accumulate that
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
shift = max(0, shift);
- /* Bound shift to one less then what overflows tick_length */
+ /* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
@@ -1072,12 +1071,14 @@ static void update_wall_time(void)
/*
* Finally, make sure that after the rounding
- * xtime.tv_nsec isn't larger then NSEC_PER_SEC
+ * xtime.tv_nsec isn't larger than NSEC_PER_SEC
*/
if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
+ int leap;
timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
timekeeper.xtime.tv_sec++;
- second_overflow();
+ leap = second_overflow(timekeeper.xtime.tv_sec);
+ timekeeper.xtime.tv_sec += leap;
}
timekeeping_update(false);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f7af95d304c5..6777153f18f3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -499,6 +499,7 @@ config RT_MUTEX_TESTER
config DEBUG_SPINLOCK
bool "Spinlock and rw-lock debugging: basic checks"
depends on DEBUG_KERNEL
+ select UNINLINE_SPIN_UNLOCK
help
Say Y here and build SMP to catch missing spinlock initialization
and certain other kinds of spinlock errors commonly made. This is
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 49142612916e..5914623f426a 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -733,7 +733,8 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
data = &tv;
len = sizeof(tv);
#ifdef CONFIG_COMPAT
- if (msg->msg_flags & MSG_CMSG_COMPAT) {
+ if (!COMPAT_USE_64BIT_TIME &&
+ (msg->msg_flags & MSG_CMSG_COMPAT)) {
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
data = &ctv;
diff --git a/net/compat.c b/net/compat.c
index 64b4515a64e6..e055708b8ec9 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -219,8 +219,6 @@ Efault:
int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
{
- struct compat_timeval ctv;
- struct compat_timespec cts[3];
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
struct compat_cmsghdr cmhdr;
int cmlen;
@@ -230,24 +228,28 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
return 0; /* XXX: return error? check spec. */
}
- if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
- struct timeval *tv = (struct timeval *)data;
- ctv.tv_sec = tv->tv_sec;
- ctv.tv_usec = tv->tv_usec;
- data = &ctv;
- len = sizeof(ctv);
- }
- if (level == SOL_SOCKET &&
- (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) {
- int count = type == SCM_TIMESTAMPNS ? 1 : 3;
- int i;
- struct timespec *ts = (struct timespec *)data;
- for (i = 0; i < count; i++) {
- cts[i].tv_sec = ts[i].tv_sec;
- cts[i].tv_nsec = ts[i].tv_nsec;
+ if (!COMPAT_USE_64BIT_TIME) {
+ struct compat_timeval ctv;
+ struct compat_timespec cts[3];
+ if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
+ struct timeval *tv = (struct timeval *)data;
+ ctv.tv_sec = tv->tv_sec;
+ ctv.tv_usec = tv->tv_usec;
+ data = &ctv;
+ len = sizeof(ctv);
+ }
+ if (level == SOL_SOCKET &&
+ (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) {
+ int count = type == SCM_TIMESTAMPNS ? 1 : 3;
+ int i;
+ struct timespec *ts = (struct timespec *)data;
+ for (i = 0; i < count; i++) {
+ cts[i].tv_sec = ts[i].tv_sec;
+ cts[i].tv_nsec = ts[i].tv_nsec;
+ }
+ data = &cts;
+ len = sizeof(cts[0]) * count;
}
- data = &cts;
- len = sizeof(cts[0]) * count;
}
cmlen = CMSG_COMPAT_LEN(len);
@@ -454,11 +456,15 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
- struct compat_timeval __user *ctv =
- (struct compat_timeval __user *) userstamp;
- int err = -ENOENT;
+ struct compat_timeval __user *ctv;
+ int err;
struct timeval tv;
+ if (COMPAT_USE_64BIT_TIME)
+ return sock_get_timestamp(sk, userstamp);
+
+ ctv = (struct compat_timeval __user *) userstamp;
+ err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
tv = ktime_to_timeval(sk->sk_stamp);
@@ -478,11 +484,15 @@ EXPORT_SYMBOL(compat_sock_get_timestamp);
int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
{
- struct compat_timespec __user *ctv =
- (struct compat_timespec __user *) userstamp;
- int err = -ENOENT;
+ struct compat_timespec __user *ctv;
+ int err;
struct timespec ts;
+ if (COMPAT_USE_64BIT_TIME)
+ return sock_get_timestampns (sk, userstamp);
+
+ ctv = (struct compat_timespec __user *) userstamp;
+ err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ts = ktime_to_timespec(sk->sk_stamp);
@@ -767,6 +777,11 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
int datagrams;
struct timespec ktspec;
+ if (COMPAT_USE_64BIT_TIME)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT,
+ (struct timespec *) timeout);
+
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
diff --git a/net/socket.c b/net/socket.c
index 12a48d846223..484cc6953fc6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2592,7 +2592,7 @@ void socket_seq_show(struct seq_file *seq)
#ifdef CONFIG_COMPAT
static int do_siocgstamp(struct net *net, struct socket *sock,
- unsigned int cmd, struct compat_timeval __user *up)
+ unsigned int cmd, void __user *up)
{
mm_segment_t old_fs = get_fs();
struct timeval ktv;
@@ -2601,15 +2601,14 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
set_fs(KERNEL_DS);
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
- if (!err) {
- err = put_user(ktv.tv_sec, &up->tv_sec);
- err |= __put_user(ktv.tv_usec, &up->tv_usec);
- }
+ if (!err)
+ err = compat_put_timeval(up, &ktv);
+
return err;
}
static int do_siocgstampns(struct net *net, struct socket *sock,
- unsigned int cmd, struct compat_timespec __user *up)
+ unsigned int cmd, void __user *up)
{
mm_segment_t old_fs = get_fs();
struct timespec kts;
@@ -2618,10 +2617,9 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
set_fs(KERNEL_DS);
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
- if (!err) {
- err = put_user(kts.tv_sec, &up->tv_sec);
- err |= __put_user(kts.tv_nsec, &up->tv_nsec);
- }
+ if (!err)
+ err = compat_put_timespec(up, &kts);
+
return err;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f21ece088764..de0b0f39d9d8 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -830,6 +830,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
{
ssize_t ret;
+ if (count == 0)
+ return -EINVAL;
if (copy_from_user(kaddr, buf, count))
return -EFAULT;
kaddr[count] = '\0';
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index c84c0e0c41cb..0af37fc46818 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1014,6 +1014,7 @@ enum {
RPCAUTH_statd,
RPCAUTH_nfsd4_cb,
RPCAUTH_cache,
+ RPCAUTH_nfsd,
RPCAUTH_RootEOF
};
@@ -1046,6 +1047,10 @@ static const struct rpc_filelist files[] = {
.name = "cache",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
+ [RPCAUTH_nfsd] = {
+ .name = "nfsd",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
};
/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index bcd574f2ac56..521d8f7dc833 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -507,7 +507,7 @@ static int unix_gid_parse(struct cache_detail *cd,
time_t expiry;
struct unix_gid ug, *ugp;
- if (mlen <= 0 || mesg[mlen-1] != '\n')
+ if (mesg[mlen - 1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 40ae884db865..824d32fb3121 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1381,8 +1381,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
spin_lock_bh(&serv->sv_lock);
list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
- list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
- set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
spin_unlock_bh(&serv->sv_lock);
}
EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 09af4fab1a45..8343737e85f4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -47,6 +47,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svc_rdma.h>
+#include "xprt_rdma.h"
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 9530ef2d40dc..8d2edddf48cf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
while (ch->rc_discrim != xdr_zero) {
- u64 ch_offset;
-
if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
(unsigned long)vaend) {
dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
return NULL;
}
-
- ch->rc_discrim = ntohl(ch->rc_discrim);
- ch->rc_position = ntohl(ch->rc_position);
- ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
- ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
- va = (u32 *)&ch->rc_target.rs_offset;
- xdr_decode_hyper(va, &ch_offset);
- put_unaligned(ch_offset, (u64 *)va);
ch++;
}
return (u32 *)&ch->rc_position;
@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
*byte_count = 0;
*ch_count = 0;
for (; ch->rc_discrim != 0; ch++) {
- *byte_count = *byte_count + ch->rc_target.rs_length;
+ *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
*ch_count = *ch_count + 1;
}
}
@@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
*/
static u32 *decode_write_list(u32 *va, u32 *vaend)
{
- int ch_no;
+ int nchunks;
+
struct rpcrdma_write_array *ary =
(struct rpcrdma_write_array *)va;
@@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
return NULL;
}
- ary->wc_discrim = ntohl(ary->wc_discrim);
- ary->wc_nchunks = ntohl(ary->wc_nchunks);
+ nchunks = ntohl(ary->wc_nchunks);
if (((unsigned long)&ary->wc_array[0] +
- (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) >
+ (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
(unsigned long)vaend) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
- ary, ary->wc_nchunks, vaend);
+ ary, nchunks, vaend);
return NULL;
}
- for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
- u64 ch_offset;
-
- ary->wc_array[ch_no].wc_target.rs_handle =
- ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
- ary->wc_array[ch_no].wc_target.rs_length =
- ntohl(ary->wc_array[ch_no].wc_target.rs_length);
- va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
- xdr_decode_hyper(va, &ch_offset);
- put_unaligned(ch_offset, (u64 *)va);
- }
-
/*
* rs_length is the 2nd 4B field in wc_target and taking its
* address skips the list terminator
*/
- return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length;
+ return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
}
static u32 *decode_reply_array(u32 *va, u32 *vaend)
{
- int ch_no;
+ int nchunks;
struct rpcrdma_write_array *ary =
(struct rpcrdma_write_array *)va;
@@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
return NULL;
}
- ary->wc_discrim = ntohl(ary->wc_discrim);
- ary->wc_nchunks = ntohl(ary->wc_nchunks);
+ nchunks = ntohl(ary->wc_nchunks);
if (((unsigned long)&ary->wc_array[0] +
- (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) >
+ (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
(unsigned long)vaend) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
- ary, ary->wc_nchunks, vaend);
+ ary, nchunks, vaend);
return NULL;
}
- for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
- u64 ch_offset;
-
- ary->wc_array[ch_no].wc_target.rs_handle =
- ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
- ary->wc_array[ch_no].wc_target.rs_length =
- ntohl(ary->wc_array[ch_no].wc_target.rs_length);
- va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
- xdr_decode_hyper(va, &ch_offset);
- put_unaligned(ch_offset, (u64 *)va);
- }
-
- return (u32 *)&ary->wc_array[ch_no];
+ return (u32 *)&ary->wc_array[nchunks];
}
int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
@@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
int chunk_no,
- u32 rs_handle, u64 rs_offset,
+ __be32 rs_handle,
+ __be64 rs_offset,
u32 write_len)
{
struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
- seg->rs_handle = htonl(rs_handle);
+ seg->rs_handle = rs_handle;
+ seg->rs_offset = rs_offset;
seg->rs_length = htonl(write_len);
- xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
}
void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index df67211c4baf..41cb63b623df 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
page_off = 0;
ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
ch_no = 0;
- ch_bytes = ch->rc_target.rs_length;
+ ch_bytes = ntohl(ch->rc_target.rs_length);
head->arg.head[0] = rqstp->rq_arg.head[0];
head->arg.tail[0] = rqstp->rq_arg.tail[0];
head->arg.pages = &head->pages[head->count];
@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
ch_no++;
ch++;
chl_map->ch[ch_no].start = sge_no;
- ch_bytes = ch->rc_target.rs_length;
+ ch_bytes = ntohl(ch->rc_target.rs_length);
/* If bytes remaining account for next chunk */
if (byte_count) {
head->arg.page_len += ch_bytes;
@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
offset = 0;
ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
for (ch_no = 0; ch_no < ch_count; ch_no++) {
+ int len = ntohl(ch->rc_target.rs_length);
rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
- rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length;
+ rpl_map->sge[ch_no].iov_len = len;
chl_map->ch[ch_no].count = 1;
chl_map->ch[ch_no].start = ch_no;
- offset += ch->rc_target.rs_length;
+ offset += len;
ch++;
}
@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
for (i = 0; i < count; i++) {
ctxt->sge[i].length = 0; /* in case map fails */
if (!frmr) {
- BUG_ON(0 == virt_to_page(vec[i].iov_base));
+ BUG_ON(!virt_to_page(vec[i].iov_base));
off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
ctxt->sge[i].addr =
ib_dma_map_page(xprt->sc_cm_id->device,
@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
ch->rc_discrim != 0; ch++, ch_no++) {
+ u64 rs_offset;
next_sge:
ctxt = svc_rdma_get_context(xprt);
ctxt->direction = DMA_FROM_DEVICE;
@@ -440,10 +442,10 @@ next_sge:
read_wr.opcode = IB_WR_RDMA_READ;
ctxt->wr_op = read_wr.opcode;
read_wr.send_flags = IB_SEND_SIGNALED;
- read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
- read_wr.wr.rdma.remote_addr =
- get_unaligned(&(ch->rc_target.rs_offset)) +
- sgl_offset;
+ read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
+ xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
+ &rs_offset);
+ read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
read_wr.sg_list = ctxt->sge;
read_wr.num_sge =
rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 249a835b703f..42eb7ba0b903 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
u64 rs_offset;
arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
- write_len = min(xfer_len, arg_ch->rs_length);
+ write_len = min(xfer_len, ntohl(arg_ch->rs_length));
/* Prepare the response chunk given the length actually
* written */
- rs_offset = get_unaligned(&(arg_ch->rs_offset));
+ xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
- arg_ch->rs_handle,
- rs_offset,
- write_len);
+ arg_ch->rs_handle,
+ arg_ch->rs_offset,
+ write_len);
chunk_off = 0;
while (write_len) {
int this_write;
this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp,
- arg_ch->rs_handle,
+ ntohl(arg_ch->rs_handle),
rs_offset + chunk_off,
xdr_off,
this_write,
@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
u32 xdr_off;
int chunk_no;
int chunk_off;
+ int nchunks;
struct rpcrdma_segment *ch;
struct rpcrdma_write_array *arg_ary;
struct rpcrdma_write_array *res_ary;
@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
max_write = xprt->sc_max_sge * PAGE_SIZE;
/* xdr offset starts at RPC message */
+ nchunks = ntohl(arg_ary->wc_nchunks);
for (xdr_off = 0, chunk_no = 0;
- xfer_len && chunk_no < arg_ary->wc_nchunks;
+ xfer_len && chunk_no < nchunks;
chunk_no++) {
u64 rs_offset;
ch = &arg_ary->wc_array[chunk_no].wc_target;
- write_len = min(xfer_len, ch->rs_length);
+ write_len = min(xfer_len, htonl(ch->rs_length));
/* Prepare the reply chunk given the length actually
* written */
- rs_offset = get_unaligned(&(ch->rs_offset));
+ xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
- ch->rs_handle, rs_offset,
- write_len);
+ ch->rs_handle, ch->rs_offset,
+ write_len);
chunk_off = 0;
while (write_len) {
int this_write;
this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp,
- ch->rs_handle,
+ ntohl(ch->rs_handle),
rs_offset + chunk_off,
xdr_off,
this_write,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 894cb42db91d..73b428bef598 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -51,6 +51,7 @@
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
#include <linux/export.h>
+#include "xprt_rdma.h"
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
};
-/* WR context cache. Created in svc_rdma.c */
-extern struct kmem_cache *svc_rdma_ctxt_cachep;
-
-/* Workqueue created in svc_rdma.c */
-extern struct workqueue_struct *svc_rdma_wq;
-
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{
struct svc_rdma_op_ctxt *ctxt;
@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
atomic_dec(&xprt->sc_ctxt_used);
}
-/* Temporary NFS request map cache. Created in svc_rdma.c */
-extern struct kmem_cache *svc_rdma_map_cachep;
-
/*
* Temporary NFS req mappings are shared across all transport
* instances. These are short lived and should be bounded by the number
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 08c5d5a128fc..9a66c95b5837 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
*/
int rpcrdma_marshal_req(struct rpc_rqst *);
+/* Temporary NFS request map cache. Created in svc_rdma.c */
+extern struct kmem_cache *svc_rdma_map_cachep;
+/* WR context cache. Created in svc_rdma.c */
+extern struct kmem_cache *svc_rdma_ctxt_cachep;
+/* Workqueue created in svc_rdma.c */
+extern struct workqueue_struct *svc_rdma_wq;
+
#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 92bc5181dbeb..890b03f8d877 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2475,6 +2475,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
static struct rpc_xprt_ops bc_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xprt_release_xprt,
+ .rpcbind = xs_local_rpcbind,
.buf_alloc = bc_malloc,
.buf_free = bc_free,
.send_request = bc_send_request,
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
index 98cffcb941ea..a2af2e88daf3 100644
--- a/scripts/gcc-goto.sh
+++ b/scripts/gcc-goto.sh
@@ -2,4 +2,20 @@
# Test for gcc 'asm goto' support
# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
-echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
+int main(void)
+{
+#ifdef __arm__
+ /*
+ * Not related to asm goto, but used by jump label
+ * and broken on some ARM GCC versions (see GCC Bug 48637).
+ */
+ static struct { int dummy; int state; } tp;
+ asm (".long %c0" :: "i" (&tp.state));
+#endif
+
+entry:
+ asm goto ("" :::: entry);
+ return 0;
+}
+END
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 304929909375..15c6c567468b 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2147,7 +2147,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
fdt = files_fdtable(files);
if (i >= fdt->max_fds)
break;
- set = fdt->open_fds->fds_bits[j];
+ set = fdt->open_fds[j];
if (!set)
continue;
spin_unlock(&files->file_lock);
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index d1aa4218f129..48d7c0aa5073 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -17,11 +17,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/ac97_codec.h>
#include <sound/pxa2xx-lib.h>
-#include <asm/irq.h>
+#include <mach/irqs.h>
#include <mach/regs-ac97.h>
#include <mach/audio.h>
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 3a39626a82d6..afef72c4f0d3 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 4fa1dbd8ee83..f7c2bb08055d 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#include <linux/io.h>
#include <sound/core.h>
@@ -467,15 +468,24 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev)
snd_card_set_dev(card, &pdev->dev);
if (pdata->dws.dma_dev) {
- struct dw_dma_slave *dws = &pdata->dws;
dma_cap_mask_t mask;
- dws->tx_reg = regs->start + DAC_DATA;
-
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- dac->dma.chan = dma_request_channel(mask, filter, dws);
+ dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws);
+ if (dac->dma.chan) {
+ struct dma_slave_config dma_conf = {
+ .dst_addr = regs->start + DAC_DATA,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = 1,
+ .dst_maxburst = 1,
+ .direction = DMA_MEM_TO_DEV,
+ .device_fc = false,
+ };
+
+ dmaengine_slave_config(dac->dma.chan, &dma_conf);
+ }
}
if (!pdata->dws.dma_dev || !dac->dma.chan) {
dev_dbg(&pdev->dev, "DMA not available\n");
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 61dade698358..115313ef54d6 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
+#include <linux/types.h>
#include <linux/io.h>
#include <sound/core.h>
@@ -1014,16 +1015,28 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
if (cpu_is_at32ap7000()) {
if (pdata->rx_dws.dma_dev) {
- struct dw_dma_slave *dws = &pdata->rx_dws;
dma_cap_mask_t mask;
- dws->rx_reg = regs->start + AC97C_CARHR + 2;
-
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
chip->dma.rx_chan = dma_request_channel(mask, filter,
- dws);
+ &pdata->rx_dws);
+ if (chip->dma.rx_chan) {
+ struct dma_slave_config dma_conf = {
+ .src_addr = regs->start + AC97C_CARHR +
+ 2,
+ .src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .src_maxburst = 1,
+ .dst_maxburst = 1,
+ .direction = DMA_DEV_TO_MEM,
+ .device_fc = false,
+ };
+
+ dmaengine_slave_config(chip->dma.rx_chan,
+ &dma_conf);
+ }
dev_info(&chip->pdev->dev, "using %s for DMA RX\n",
dev_name(&chip->dma.rx_chan->dev->device));
@@ -1031,16 +1044,28 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
}
if (pdata->tx_dws.dma_dev) {
- struct dw_dma_slave *dws = &pdata->tx_dws;
dma_cap_mask_t mask;
- dws->tx_reg = regs->start + AC97C_CATHR + 2;
-
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
chip->dma.tx_chan = dma_request_channel(mask, filter,
- dws);
+ &pdata->tx_dws);
+ if (chip->dma.tx_chan) {
+ struct dma_slave_config dma_conf = {
+ .dst_addr = regs->start + AC97C_CATHR +
+ 2,
+ .dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .src_maxburst = 1,
+ .dst_maxburst = 1,
+ .direction = DMA_MEM_TO_DEV,
+ .device_fc = false,
+ };
+
+ dmaengine_slave_config(chip->dma.tx_chan,
+ &dma_conf);
+ }
dev_info(&chip->pdev->dev, "using %s for DMA TX\n",
dev_name(&chip->dma.tx_chan->dev->device));
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index bbe32d2177d9..dbc550716790 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -46,7 +46,7 @@
The number of ports to be created can be specified via the module
parameter "ports". For example, to create four ports, add the
- following option in /etc/modprobe.conf:
+ following option in a configuration file under /etc/modprobe.d/:
option snd-seq-dummy ports=4
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index c8961165277c..fe5ae09ffccb 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -50,7 +50,8 @@ config SND_PCSP
before the other sound driver of yours, making the
pc-speaker a default sound device. Which is likely not
what you want. To make this driver play nicely with other
- sound driver, you can add this into your /etc/modprobe.conf:
+ sound driver, you can add this in a configuration file under
+ /etc/modprobe.d/ directory:
options snd-pcsp index=2
You don't need this driver if you only want your pc-speaker to beep.
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index babaedd242f7..d7ccf28bd66a 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -65,7 +65,7 @@ static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
//static bool enable = SNDRV_DEFAULT_ENABLE1; /* Enable this card */
#ifdef CONFIG_PNP
-static int isapnp = 1; /* Enable ISA PnP detection */
+static bool isapnp = true; /* Enable ISA PnP detection */
#endif
static long port = SNDRV_DEFAULT_PORT1; /* 0x530,0xe80,0xf40,0x604 */
static long mpu_port = SNDRV_DEFAULT_PORT1; /* 0x300,0x310,0x320,0x330 */
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index eba734560f6f..2c79d60a725f 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -1631,7 +1631,7 @@ static int ide_irq __initdata = 0;
static int joystick_io __initdata = 0;
/* If we have the digital daugherboard... */
-static int digital __initdata = 0;
+static bool digital __initdata = false;
#endif
static int fifosize __initdata = DEFFIFOSIZE;
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 4cc315daeda0..8c63200cf339 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -42,7 +42,7 @@ On error *pLockedMemHandle marked invalid, non-zero returned.
If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and
HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle.
*/
-u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
+int hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
/**< memory handle */
u32 size, /**< Size in bytes to allocate */
struct pci_dev *p_os_reference
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 2d7d1c2e1d0d..87f4385fe8c7 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -43,7 +43,7 @@ void hpios_delay_micro_seconds(u32 num_micro_sec)
On error, return -ENOMEM, and *pMemArea.size = 0
*/
-u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
+int hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
struct pci_dev *pdev)
{
/*?? any benefit in using managed dmam_alloc_coherent? */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8ea2fd654327..9917e55d6f11 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2717,9 +2717,6 @@ static int alc_auto_fill_adc_caps(struct hda_codec *codec)
int max_nums = ARRAY_SIZE(spec->private_adc_nids);
int i, nums = 0;
- if (spec->shared_mic_hp)
- max_nums = 1; /* no multi streams with the shared HP/mic */
-
nid = codec->start_nid;
for (i = 0; i < codec->num_nodes; i++, nid++) {
hda_nid_t src;
@@ -4076,6 +4073,7 @@ static void alc_remove_invalid_adc_nids(struct hda_codec *codec)
if (spec->dyn_adc_switch)
return;
+ again:
nums = 0;
for (n = 0; n < spec->num_adc_nids; n++) {
hda_nid_t cap = spec->private_capsrc_nids[n];
@@ -4096,6 +4094,11 @@ static void alc_remove_invalid_adc_nids(struct hda_codec *codec)
if (!nums) {
/* check whether ADC-switch is possible */
if (!alc_check_dyn_adc_switch(codec)) {
+ if (spec->shared_mic_hp) {
+ spec->shared_mic_hp = 0;
+ spec->private_imux[0].num_items = 1;
+ goto again;
+ }
printk(KERN_WARNING "hda_codec: %s: no valid ADC found;"
" using fallback 0x%x\n",
codec->chip_name, spec->private_adc_nids[0]);
@@ -4113,7 +4116,7 @@ static void alc_remove_invalid_adc_nids(struct hda_codec *codec)
if (spec->auto_mic)
alc_auto_mic_check_imux(codec); /* check auto-mic setups */
- else if (spec->input_mux->num_items == 1)
+ else if (spec->input_mux->num_items == 1 || spec->shared_mic_hp)
spec->num_adc_nids = 1; /* reduce to a single ADC */
}
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index fe7fbaeb7146..7c49642af052 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3629,7 +3629,7 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
case 2:
case 3:
wm8994->hubs.dcs_codes_l = -9;
- wm8994->hubs.dcs_codes_r = -5;
+ wm8994->hubs.dcs_codes_r = -7;
break;
default:
break;
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index e43c8fa2788b..6b818de2fc03 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/types.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -58,6 +59,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
+ slave_config.device_fc = false;
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
slave_config.dst_addr = dma_params->dma_addr;
slave_config.dst_maxburst = dma_params->burstsize;
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index 6ca1f46d84a4..e373fbbc97a0 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -28,6 +28,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/fsl/mxs-dma.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -36,7 +37,6 @@
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
-#include <mach/dma.h>
#include "mxs-pcm.h"
struct mxs_pcm_dma_data {
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 12be05b16880..53f4fd8feced 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -24,12 +24,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/time.h>
+#include <linux/fsl/mxs-dma.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/saif.h>
-#include <mach/dma.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <mach/mxs.h>
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 4800d5fe568d..06ea2744cc88 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 0193e595d415..5cfcc655e95f 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -130,7 +130,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info,
sg_dma_len(&sg) = size;
sg_dma_address(&sg) = buff;
- desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
+ desc = dmaengine_prep_slave_sg(siu_stream->chan,
&sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(dev, "Failed to allocate a dma descriptor\n");
@@ -180,7 +180,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
sg_dma_len(&sg) = size;
sg_dma_address(&sg) = buff;
- desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
+ desc = dmaengine_prep_slave_sg(siu_stream->chan,
&sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(dev, "Failed to allocate dma descriptor\n");
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/soc/soc-dmaengine-pcm.c
index 4420b7030c83..475695234b3d 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/soc/soc-dmaengine-pcm.c
@@ -143,7 +143,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
direction = snd_pcm_substream_to_dma_direction(substream);
prtd->pos = 0;
- desc = chan->device->device_prep_dma_cyclic(chan,
+ desc = dmaengine_prep_dma_cyclic(chan,
substream->runtime->dma_addr,
snd_pcm_lib_buffer_bytes(substream),
snd_pcm_lib_period_bytes(substream), direction);
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 21554611557c..b609d2c64c55 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -132,7 +132,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
sg_dma_address(&sg) = buf_dma_addr;
- desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
+ desc = dmaengine_prep_slave_sg(chan, &sg, 1,
dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index e8a03aceceb1..a93e06cfcc2a 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -19,6 +19,16 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+OUTPUT=./
+ifeq ("$(origin O)", "command line")
+ OUTPUT := $(O)/
+endif
+
+ifneq ($(OUTPUT),)
+# check that the output directory actually exists
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+endif
# --- CONFIGURATION BEGIN ---
@@ -87,6 +97,7 @@ AR = $(CROSS)ar
STRIP = $(CROSS)strip
RANLIB = $(CROSS)ranlib
HOSTCC = gcc
+MKDIR = mkdir
# Now we set up the build system
@@ -95,7 +106,7 @@ HOSTCC = gcc
# set up PWD so that older versions of make will work with our build.
PWD = $(shell pwd)
-GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo po/$$HLANG.gmo; done;}
+GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo; done;}
export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
@@ -122,15 +133,18 @@ UTIL_OBJS = utils/helpers/amd.o utils/helpers/topology.o utils/helpers/msr.o \
utils/cpupower.o utils/cpufreq-info.o utils/cpufreq-set.o \
utils/cpupower-set.o utils/cpupower-info.o utils/cpuidle-info.o
+UTIL_SRC := $(UTIL_OBJS:.o=.c)
+
+UTIL_OBJS := $(addprefix $(OUTPUT),$(UTIL_OBJS))
+
UTIL_HEADERS = utils/helpers/helpers.h utils/idle_monitor/cpupower-monitor.h \
utils/helpers/bitmask.h \
utils/idle_monitor/idle_monitors.h utils/idle_monitor/idle_monitors.def
-UTIL_SRC := $(UTIL_OBJS:.o=.c)
-
LIB_HEADERS = lib/cpufreq.h lib/sysfs.h
LIB_SRC = lib/cpufreq.c lib/sysfs.c
LIB_OBJS = lib/cpufreq.o lib/sysfs.o
+LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS))
CFLAGS += -pipe
@@ -168,83 +182,90 @@ endif
# the actual make rules
-all: libcpupower cpupower $(COMPILE_NLS) $(COMPILE_BENCH)
+all: libcpupower $(OUTPUT)cpupower $(COMPILE_NLS) $(COMPILE_BENCH)
-lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
+$(OUTPUT)lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
$(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c
-libcpupower.so.$(LIB_MAJ): $(LIB_OBJS)
+$(OUTPUT)libcpupower.so.$(LIB_MAJ): $(LIB_OBJS)
$(ECHO) " LD " $@
$(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \
-Wl,-soname,libcpupower.so.$(LIB_MIN) $(LIB_OBJS)
- @ln -sf $@ libcpupower.so
- @ln -sf $@ libcpupower.so.$(LIB_MIN)
+ @ln -sf $(@F) $(OUTPUT)libcpupower.so
+ @ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MIN)
-libcpupower: libcpupower.so.$(LIB_MAJ)
+libcpupower: $(OUTPUT)libcpupower.so.$(LIB_MAJ)
# Let all .o files depend on its .c file and all headers
# Might be worth to put this into utils/Makefile at some point of time
$(UTIL_OBJS): $(UTIL_HEADERS)
-.c.o:
+$(OUTPUT)%.o: %.c
$(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c
-cpupower: $(UTIL_OBJS) libcpupower.so.$(LIB_MAJ)
+$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
$(ECHO) " CC " $@
- $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) -lcpupower -lrt -lpci -L. -o $@ $(UTIL_OBJS)
+ $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
$(QUIET) $(STRIPCMD) $@
-po/$(PACKAGE).pot: $(UTIL_SRC)
+$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
$(ECHO) " GETTEXT " $@
$(QUIET) xgettext --default-domain=$(PACKAGE) --add-comments \
- --keyword=_ --keyword=N_ $(UTIL_SRC) && \
- test -f $(PACKAGE).po && \
- mv -f $(PACKAGE).po po/$(PACKAGE).pot
+ --keyword=_ --keyword=N_ $(UTIL_SRC) -p $(@D) -o $(@F)
-po/%.gmo: po/%.po
+$(OUTPUT)po/%.gmo: po/%.po
$(ECHO) " MSGFMT " $@
$(QUIET) msgfmt -o $@ po/$*.po
create-gmo: ${GMO_FILES}
-update-po: po/$(PACKAGE).pot
+update-po: $(OUTPUT)po/$(PACKAGE).pot
$(ECHO) " MSGMRG " $@
$(QUIET) @for HLANG in $(LANGUAGES); do \
echo -n "Updating $$HLANG "; \
- if msgmerge po/$$HLANG.po po/$(PACKAGE).pot -o \
- po/$$HLANG.new.po; then \
- mv -f po/$$HLANG.new.po po/$$HLANG.po; \
+ if msgmerge po/$$HLANG.po $< -o \
+ $(OUTPUT)po/$$HLANG.new.po; then \
+ mv -f $(OUTPUT)po/$$HLANG.new.po $(OUTPUT)po/$$HLANG.po; \
else \
echo "msgmerge for $$HLANG failed!"; \
- rm -f po/$$HLANG.new.po; \
+ rm -f $(OUTPUT)po/$$HLANG.new.po; \
fi; \
done;
-compile-bench: libcpupower.so.$(LIB_MAJ)
- @V=$(V) confdir=$(confdir) $(MAKE) -C bench
+compile-bench: $(OUTPUT)libcpupower.so.$(LIB_MAJ)
+ @V=$(V) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT)
+
+# we compile into subdirectories. if the target directory is not the
+# source directory, they might not exists. So we depend the various
+# files onto their directories.
+DIRECTORY_DEPS = $(LIB_OBJS) $(UTIL_OBJS) $(GMO_FILES)
+$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
+
+# In the second step, we make a rule to actually create these directories
+$(sort $(dir $(DIRECTORY_DEPS))):
+ $(ECHO) " MKDIR " $@
+ $(QUIET) $(MKDIR) -p $@ 2>/dev/null
clean:
- -find . \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
+ -find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
| xargs rm -f
- -rm -f $(UTIL_BINS)
- -rm -f $(IDLE_OBJS)
- -rm -f cpupower
- -rm -f libcpupower.so*
- -rm -rf po/*.gmo po/*.pot
- $(MAKE) -C bench clean
+ -rm -f $(OUTPUT)cpupower
+ -rm -f $(OUTPUT)libcpupower.so*
+ -rm -rf $(OUTPUT)po/*.{gmo,pot}
+ $(MAKE) -C bench O=$(OUTPUT) clean
install-lib:
$(INSTALL) -d $(DESTDIR)${libdir}
- $(CP) libcpupower.so* $(DESTDIR)${libdir}/
+ $(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
$(INSTALL) -d $(DESTDIR)${includedir}
$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
install-tools:
$(INSTALL) -d $(DESTDIR)${bindir}
- $(INSTALL_PROGRAM) cpupower $(DESTDIR)${bindir}
+ $(INSTALL_PROGRAM) $(OUTPUT)cpupower $(DESTDIR)${bindir}
install-man:
$(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1
@@ -257,13 +278,13 @@ install-man:
install-gmo:
$(INSTALL) -d $(DESTDIR)${localedir}
for HLANG in $(LANGUAGES); do \
- echo '$(INSTALL_DATA) -D po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
- $(INSTALL_DATA) -D po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
+ echo '$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
+ $(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
done;
install-bench:
@#DESTDIR must be set from outside to survive
- @sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench install
+ @sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index 2b67606fc3e3..7ec7021a29cd 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -1,29 +1,36 @@
-LIBS = -L../ -lm -lcpupower
+OUTPUT := ./
+ifeq ("$(origin O)", "command line")
+ifneq ($(O),)
+ OUTPUT := $(O)/
+endif
+endif
-OBJS = main.o parse.o system.o benchmark.o
+LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
+
+OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
-%.o : %.c
+$(OUTPUT)%.o : %.c
$(ECHO) " CC " $@
$(QUIET) $(CC) -c $(CFLAGS) $< -o $@
-cpufreq-bench: $(OBJS)
+$(OUTPUT)cpufreq-bench: $(OBJS)
$(ECHO) " CC " $@
$(QUIET) $(CC) -o $@ $(CFLAGS) $(OBJS) $(LIBS)
-all: cpufreq-bench
+all: $(OUTPUT)cpufreq-bench
install:
mkdir -p $(DESTDIR)/$(sbindir)
mkdir -p $(DESTDIR)/$(bindir)
mkdir -p $(DESTDIR)/$(docdir)
mkdir -p $(DESTDIR)/$(confdir)
- install -m 755 cpufreq-bench $(DESTDIR)/$(sbindir)/cpufreq-bench
+ install -m 755 $(OUTPUT)cpufreq-bench $(DESTDIR)/$(sbindir)/cpufreq-bench
install -m 755 cpufreq-bench_plot.sh $(DESTDIR)/$(bindir)/cpufreq-bench_plot.sh
install -m 644 README-BENCH $(DESTDIR)/$(docdir)/README-BENCH
install -m 755 cpufreq-bench_script.sh $(DESTDIR)/$(docdir)/cpufreq-bench_script.sh
install -m 644 example.cfg $(DESTDIR)/$(confdir)/cpufreq-bench.conf
clean:
- rm -f *.o
- rm -f cpufreq-bench
+ rm -f $(OUTPUT)*.o
+ rm -f $(OUTPUT)cpufreq-bench
diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile
index d08cc1ead9bc..3ba158f0e287 100644
--- a/tools/power/cpupower/debug/i386/Makefile
+++ b/tools/power/cpupower/debug/i386/Makefile
@@ -1,20 +1,38 @@
+OUTPUT=./
+ifeq ("$(origin O)", "command line")
+ OUTPUT := $(O)/
+endif
+
+DESTDIR =
+bindir = /usr/bin
+
+INSTALL = /usr/bin/install
+
+
default: all
-centrino-decode: centrino-decode.c
- $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c
+$(OUTPUT)centrino-decode: centrino-decode.c
+ $(CC) $(CFLAGS) -o $@ centrino-decode.c
-dump_psb: dump_psb.c
- $(CC) $(CFLAGS) -o dump_psb dump_psb.c
+$(OUTPUT)dump_psb: dump_psb.c
+ $(CC) $(CFLAGS) -o $@ dump_psb.c
-intel_gsic: intel_gsic.c
- $(CC) $(CFLAGS) -o intel_gsic -llrmi intel_gsic.c
+$(OUTPUT)intel_gsic: intel_gsic.c
+ $(CC) $(CFLAGS) -o $@ -llrmi intel_gsic.c
-powernow-k8-decode: powernow-k8-decode.c
- $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c
+$(OUTPUT)powernow-k8-decode: powernow-k8-decode.c
+ $(CC) $(CFLAGS) -o $@ powernow-k8-decode.c
-all: centrino-decode dump_psb intel_gsic powernow-k8-decode
+all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode
clean:
- rm -rf centrino-decode dump_psb intel_gsic powernow-k8-decode
+ rm -rf $(OUTPUT){centrino-decode,dump_psb,intel_gsic,powernow-k8-decode}
+
+install:
+ $(INSTALL) -d $(DESTDIR)${bindir}
+ $(INSTALL) $(OUTPUT)centrino-decode $(DESTDIR)${bindir}
+ $(INSTALL) $(OUTPUT)powernow-k8-decode $(DESTDIR)${bindir}
+ $(INSTALL) $(OUTPUT)dump_psb $(DESTDIR)${bindir}
+ $(INSTALL) $(OUTPUT)intel_gsic $(DESTDIR)${bindir}
-.PHONY: all default clean
+.PHONY: all default clean install
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
index 3326217dd311..1c5214526716 100644
--- a/tools/power/cpupower/debug/x86_64/Makefile
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -1,14 +1,30 @@
+OUTPUT=./
+ifeq ("$(origin O)", "command line")
+ OUTPUT := $(O)/
+endif
+
+DESTDIR =
+bindir = /usr/bin
+
+INSTALL = /usr/bin/install
+
+
default: all
-centrino-decode: ../i386/centrino-decode.c
+$(OUTPUT)centrino-decode: ../i386/centrino-decode.c
$(CC) $(CFLAGS) -o $@ $<
-powernow-k8-decode: ../i386/powernow-k8-decode.c
+$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
$(CC) $(CFLAGS) -o $@ $<
-all: centrino-decode powernow-k8-decode
+all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
clean:
- rm -rf centrino-decode powernow-k8-decode
+ rm -rf $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
+
+install:
+ $(INSTALL) -d $(DESTDIR)${bindir}
+ $(INSTALL) $(OUTPUT)centrino-decode $(DESTDIR)${bindir}
+ $(INSTALL) $(OUTPUT)powernow-k8-decode $(DESTDIR)${bindir}
-.PHONY: all default clean
+.PHONY: all default clean install
diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1
index bb60a8d1e45a..4a1918ea8f9c 100644
--- a/tools/power/cpupower/man/cpupower-frequency-info.1
+++ b/tools/power/cpupower/man/cpupower-frequency-info.1
@@ -1,4 +1,4 @@
-.TH "cpupower-frequency-info" "1" "0.1" "Mattia Dongili" ""
+.TH "CPUPOWER\-FREQUENCY\-INFO" "1" "0.1" "" "cpupower Manual"
.SH "NAME"
.LP
cpupower frequency\-info \- Utility to retrieve cpufreq kernel information
@@ -50,8 +50,6 @@ Prints out information like provided by the /proc/cpufreq interface in 2.4. and
\fB\-m\fR \fB\-\-human\fR
human\-readable output for the \-f, \-w, \-s and \-y parameters.
.TP
-\fB\-h\fR \fB\-\-help\fR
-Prints out the help screen.
.SH "REMARKS"
.LP
By default only values of core zero are displayed. How to display settings of
diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1
index 685f469093ad..3eacc8d03d1a 100644
--- a/tools/power/cpupower/man/cpupower-frequency-set.1
+++ b/tools/power/cpupower/man/cpupower-frequency-set.1
@@ -1,4 +1,4 @@
-.TH "cpupower-freqency-set" "1" "0.1" "Mattia Dongili" ""
+.TH "CPUPOWER\-FREQUENCY\-SET" "1" "0.1" "" "cpupower Manual"
.SH "NAME"
.LP
cpupower frequency\-set \- A small tool which allows to modify cpufreq settings.
@@ -26,8 +26,6 @@ specific frequency to be set. Requires userspace governor to be available and lo
\fB\-r\fR \fB\-\-related\fR
modify all hardware-related CPUs at the same time
.TP
-\fB\-h\fR \fB\-\-help\fR
-Prints out the help screen.
.SH "REMARKS"
.LP
By default values are applied on all cores. How to modify single core
diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1
new file mode 100644
index 000000000000..4178effd9e99
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-idle-info.1
@@ -0,0 +1,90 @@
+.TH "CPUPOWER-IDLE-INFO" "1" "0.1" "" "cpupower Manual"
+.SH "NAME"
+.LP
+cpupower idle\-info \- Utility to retrieve cpu idle kernel information
+.SH "SYNTAX"
+.LP
+cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
+.SH "DESCRIPTION"
+.LP
+A tool which prints out per cpu idle information helpful to developers and interested users.
+.SH "OPTIONS"
+.LP
+.TP
+\fB\-f\fR \fB\-\-silent\fR
+Only print a summary of all available C-states in the system.
+.TP
+\fB\-e\fR \fB\-\-proc\fR
+deprecated.
+Prints out idle information in old /proc/acpi/processor/*/power format. This
+interface has been removed from the kernel for quite some time, do not let
+further code depend on this option, best do not use it.
+
+.SH IDLE\-INFO DESCRIPTIONS
+CPU sleep state statistics and descriptions are retrieved from sysfs files,
+exported by the cpuidle kernel subsystem. The kernel only updates these
+statistics when it enters or leaves an idle state, therefore on a very idle or
+a very busy system, these statistics may not be accurate. They still provide a
+good overview about the usage and availability of processor sleep states on
+the platform.
+
+Be aware that the sleep states as exported by the hardware or BIOS and used by
+the Linux kernel may not exactly reflect the capabilities of the
+processor. This often is the case on the X86 architecture when the acpi_idle
+driver is used. It is also possible that the hardware overrules the kernel
+requests, due to internal activity monitors or other reasons.
+On recent X86 platforms it is often possible to read out hardware registers
+which monitor the duration of sleep states the processor resided in. The
+cpupower monitor tool (cpupower\-monitor(1)) can be used to show real sleep
+state residencies. Please refer to the architecture specific description
+section below.
+
+.SH IDLE\-INFO ARCHITECTURE SPECIFIC DESCRIPTIONS
+.SS "X86"
+POLL idle state
+
+If cpuidle is active, X86 platforms have one special idle state.
+The POLL idle state is not a real idle state, it does not save any
+power. Instead, a busy\-loop is executed doing nothing for a short period of
+time. This state is used if the kernel knows that work has to be processed
+very soon and entering any real hardware idle state may result in a slight
+performance penalty.
+
+There exist two different cpuidle drivers on the X86 architecture platform:
+
+"acpi_idle" cpuidle driver
+
+The acpi_idle cpuidle driver retrieves available sleep states (C\-states) from
+the ACPI BIOS tables (from the _CST ACPI function on recent platforms or from
+the FADT BIOS table on older ones).
+The C1 state is not retrieved from ACPI tables. If the C1 state is entered,
+the kernel will call the hlt instruction (or mwait on Intel).
+
+"intel_idle" cpuidle driver
+
+In kernel 2.6.36 the intel_idle driver was introduced.
+It only serves recent Intel CPUs (Nehalem, Westmere, Sandybridge, Atoms or
+newer). On older Intel CPUs the acpi_idle driver is still used (if the BIOS
+provides C\-state ACPI tables).
+The intel_idle driver knows the sleep state capabilities of the processor and
+ignores ACPI BIOS exported processor sleep states tables.
+
+.SH "REMARKS"
+.LP
+By default only values of core zero are displayed. How to display settings of
+other cores is described in the cpupower(1) manpage in the \-\-cpu option
+section.
+.SH REFERENCES
+http://www.acpi.info/spec.htm
+.SH "FILES"
+.nf
+\fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP
+\fI/sys/devices/system/cpu/cpuidle/*\fP
+.fi
+.SH "AUTHORS"
+.nf
+Thomas Renninger <trenn@suse.de>
+.fi
+.SH "SEE ALSO"
+.LP
+cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1)
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1
index d5cfa265c3d3..1141c2073719 100644
--- a/tools/power/cpupower/man/cpupower-monitor.1
+++ b/tools/power/cpupower/man/cpupower-monitor.1
@@ -107,7 +107,7 @@ Deepest package sleep states may in reality show up as machine/platform wide
sleep states and can only be entered if all cores are idle. Look up Intel
manuals (some are provided in the References section) for further details.
-.SS "Ontario" "Liano"
+.SS "Fam_12h" "Fam_14h"
AMD laptop and desktop processor (family 12h and 14h) sleep state counters.
The registers are accessed via PCI and therefore can still be read out while
cores have been offlined.
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index b028267c1376..8145af5f93a6 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -35,17 +35,9 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose)
printf(_("CPU %u: Can't read idle state info\n"), cpu);
return;
}
- tmp = sysfs_get_idlestate_name(cpu, idlestates - 1);
- if (!tmp) {
- printf(_("Could not determine max idle state %u\n"),
- idlestates - 1);
- return;
- }
-
printf(_("Number of idle states: %d\n"), idlestates);
-
printf(_("Available idle states:"));
- for (idlestate = 1; idlestate < idlestates; idlestate++) {
+ for (idlestate = 0; idlestate < idlestates; idlestate++) {
tmp = sysfs_get_idlestate_name(cpu, idlestate);
if (!tmp)
continue;
@@ -57,7 +49,7 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose)
if (!verbose)
return;
- for (idlestate = 1; idlestate < idlestates; idlestate++) {
+ for (idlestate = 0; idlestate < idlestates; idlestate++) {
tmp = sysfs_get_idlestate_name(cpu, idlestate);
if (!tmp)
continue;
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
index 87d5605bdda8..6437ef39aeea 100644
--- a/tools/power/cpupower/utils/helpers/amd.c
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -112,14 +112,12 @@ int decode_pstates(unsigned int cpu, unsigned int cpu_family,
int amd_pci_get_num_boost_states(int *active, int *states)
{
struct pci_access *pci_acc;
- int vendor_id = 0x1022;
- int boost_dev_ids[4] = {0x1204, 0x1604, 0x1704, 0};
struct pci_dev *device;
uint8_t val = 0;
*active = *states = 0;
- device = pci_acc_init(&pci_acc, vendor_id, boost_dev_ids);
+ device = pci_slot_func_init(&pci_acc, 0x18, 4);
if (device == NULL)
return -ENODEV;
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 2747e738efb0..2eb584cf2f55 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -66,8 +66,8 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
#define CPUPOWER_CAP_AMD_CBP 0x00000004
#define CPUPOWER_CAP_PERF_BIAS 0x00000008
#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010
-#define CPUPOWER_CAP_IS_SNB 0x00000011
-#define CPUPOWER_CAP_INTEL_IDA 0x00000012
+#define CPUPOWER_CAP_IS_SNB 0x00000020
+#define CPUPOWER_CAP_INTEL_IDA 0x00000040
#define MAX_HW_PSTATES 10
@@ -132,8 +132,11 @@ extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
/* PCI stuff ****************************/
extern int amd_pci_get_num_boost_states(int *active, int *states);
-extern struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id,
- int *dev_ids);
+extern struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain,
+ int bus, int slot, int func, int vendor,
+ int dev);
+extern struct pci_dev *pci_slot_func_init(struct pci_access **pacc,
+ int slot, int func);
/* PCI stuff ****************************/
diff --git a/tools/power/cpupower/utils/helpers/pci.c b/tools/power/cpupower/utils/helpers/pci.c
index cd2eb6fe41c4..9690798e6446 100644
--- a/tools/power/cpupower/utils/helpers/pci.c
+++ b/tools/power/cpupower/utils/helpers/pci.c
@@ -10,19 +10,24 @@
* **pacc : if a valid pci_dev is returned
* *pacc must be passed to pci_acc_cleanup to free it
*
- * vendor_id : the pci vendor id matching the pci device to access
- * dev_ids : device ids matching the pci device to access
+ * domain: domain
+ * bus: bus
+ * slot: slot
+ * func: func
+ * vendor: vendor
+ * device: device
+ * Pass -1 for one of the six above to match any
*
* Returns :
* struct pci_dev which can be used with pci_{read,write}_* functions
* to access the PCI config space of matching pci devices
*/
-struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id,
- int *dev_ids)
+struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus,
+ int slot, int func, int vendor, int dev)
{
- struct pci_filter filter_nb_link = { -1, -1, -1, -1, vendor_id, 0};
+ struct pci_filter filter_nb_link = { domain, bus, slot, func,
+ vendor, dev };
struct pci_dev *device;
- unsigned int i;
*pacc = pci_alloc();
if (*pacc == NULL)
@@ -31,14 +36,20 @@ struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id,
pci_init(*pacc);
pci_scan_bus(*pacc);
- for (i = 0; dev_ids[i] != 0; i++) {
- filter_nb_link.device = dev_ids[i];
- for (device = (*pacc)->devices; device; device = device->next) {
- if (pci_filter_match(&filter_nb_link, device))
- return device;
- }
+ for (device = (*pacc)->devices; device; device = device->next) {
+ if (pci_filter_match(&filter_nb_link, device))
+ return device;
}
pci_cleanup(*pacc);
return NULL;
}
+
+/* Typically one wants to get a specific slot(device)/func of the root domain
+ and bus */
+struct pci_dev *pci_slot_func_init(struct pci_access **pacc, int slot,
+ int func)
+{
+ return pci_acc_init(pacc, 0, 0, slot, func, -1, -1);
+}
+
#endif /* defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
index 202e555988be..2116df9ad832 100644
--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -20,8 +20,6 @@
#include "idle_monitor/cpupower-monitor.h"
#include "helpers/helpers.h"
-/******** PCI parts could go into own file and get shared ***************/
-
#define PCI_NON_PC0_OFFSET 0xb0
#define PCI_PC1_OFFSET 0xb4
#define PCI_PC6_OFFSET 0xb8
@@ -82,10 +80,7 @@ static cstate_t amd_fam14h_cstates[AMD_FAM14H_STATE_NUM] = {
};
static struct pci_access *pci_acc;
-static int pci_vendor_id = 0x1022;
-static int pci_dev_ids[2] = {0x1716, 0};
static struct pci_dev *amd_fam14h_pci_dev;
-
static int nbp1_entered;
struct timespec start_time;
@@ -286,13 +281,13 @@ struct cpuidle_monitor *amd_fam14h_register(void)
if (cpupower_cpu_info.vendor != X86_VENDOR_AMD)
return NULL;
- if (cpupower_cpu_info.family == 0x14) {
- if (cpu_count <= 0 || cpu_count > 2) {
- fprintf(stderr, "AMD fam14h: Invalid cpu count: %d\n",
- cpu_count);
- return NULL;
- }
- } else
+ if (cpupower_cpu_info.family == 0x14)
+ strncpy(amd_fam14h_monitor.name, "Fam_14h",
+ MONITOR_NAME_LEN - 1);
+ else if (cpupower_cpu_info.family == 0x12)
+ strncpy(amd_fam14h_monitor.name, "Fam_12h",
+ MONITOR_NAME_LEN - 1);
+ else
return NULL;
/* We do not alloc for nbp1 machine wide counter */
@@ -303,7 +298,9 @@ struct cpuidle_monitor *amd_fam14h_register(void)
sizeof(unsigned long long));
}
- amd_fam14h_pci_dev = pci_acc_init(&pci_acc, pci_vendor_id, pci_dev_ids);
+ /* We need PCI device: Slot 18, Func 6, compare with BKDG
+ for fam 12h/14h */
+ amd_fam14h_pci_dev = pci_slot_func_init(&pci_acc, 0x18, 6);
if (amd_fam14h_pci_dev == NULL || pci_acc == NULL)
return NULL;
@@ -325,7 +322,7 @@ static void amd_fam14h_unregister(void)
}
struct cpuidle_monitor amd_fam14h_monitor = {
- .name = "Ontario",
+ .name = "",
.hw_states = amd_fam14h_cstates,
.hw_states_num = AMD_FAM14H_STATE_NUM,
.start = amd_fam14h_start,
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 555c69a5592a..adf175f61496 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -4,11 +4,13 @@ turbostat \- Report processor frequency and idle statistics
.SH SYNOPSIS
.ft B
.B turbostat
+.RB [ "\-s" ]
.RB [ "\-v" ]
.RB [ "\-M MSR#" ]
.RB command
.br
.B turbostat
+.RB [ "\-s" ]
.RB [ "\-v" ]
.RB [ "\-M MSR#" ]
.RB [ "\-i interval_sec" ]
@@ -25,6 +27,8 @@ supports an "invariant" TSC, plus the APERF and MPERF MSRs.
on processors that additionally support C-state residency counters.
.SS Options
+The \fB-s\fP option prints only a 1-line summary for each sample interval.
+.PP
The \fB-v\fP option increases verbosity.
.PP
The \fB-M MSR#\fP option dumps the specified MSR,
@@ -39,13 +43,14 @@ displays the statistics gathered since it was forked.
.SH FIELD DESCRIPTIONS
.nf
\fBpk\fP processor package number.
-\fBcr\fP processor core number.
+\fBcor\fP processor core number.
\fBCPU\fP Linux CPU (logical processor) number.
+Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
\fB%c0\fP percent of the interval that the CPU retired instructions.
\fBGHz\fP average clock rate while the CPU was in c0 state.
\fBTSC\fP average GHz that the TSC ran during the entire interval.
-\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
-\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
+\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states.
+\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states.
.fi
.PP
.SH EXAMPLE
@@ -53,25 +58,37 @@ Without any parameters, turbostat prints out counters ever 5 seconds.
(override interval with "-i sec" option, or specify a command
for turbostat to fork).
-The first row of statistics reflect the average for the entire system.
+The first row of statistics is a summary for the entire system.
+Note that the summary is a weighted average.
Subsequent rows show per-CPU statistics.
.nf
[root@x980]# ./turbostat
-cr CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07
- 0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07
- 0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07
- 1 2 0.10 1.62 3.38 0.29 0.00 99.61 0.00 95.07
- 1 8 0.11 1.62 3.38 0.28 0.00 99.61 0.00 95.07
- 2 4 0.01 1.62 3.38 0.01 0.00 99.98 0.00 95.07
- 2 10 0.01 1.61 3.38 0.02 0.00 99.98 0.00 95.07
- 8 1 0.07 1.62 3.38 0.15 0.00 99.78 0.00 95.07
- 8 7 0.03 1.62 3.38 0.19 0.00 99.78 0.00 95.07
- 9 3 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
- 9 9 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
- 10 5 0.01 1.62 3.38 0.13 0.00 99.86 0.00 95.07
- 10 11 0.08 1.62 3.38 0.05 0.00 99.86 0.00 95.07
+cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
+ 0.60 1.63 3.38 2.91 0.00 96.49 0.00 76.64
+ 0 0 0.59 1.62 3.38 4.51 0.00 94.90 0.00 76.64
+ 0 6 1.13 1.64 3.38 3.97 0.00 94.90 0.00 76.64
+ 1 2 0.08 1.62 3.38 0.07 0.00 99.85 0.00 76.64
+ 1 8 0.03 1.62 3.38 0.12 0.00 99.85 0.00 76.64
+ 2 4 0.01 1.62 3.38 0.06 0.00 99.93 0.00 76.64
+ 2 10 0.04 1.62 3.38 0.02 0.00 99.93 0.00 76.64
+ 8 1 2.85 1.62 3.38 11.71 0.00 85.44 0.00 76.64
+ 8 7 1.98 1.62 3.38 12.58 0.00 85.44 0.00 76.64
+ 9 3 0.36 1.62 3.38 0.71 0.00 98.93 0.00 76.64
+ 9 9 0.09 1.62 3.38 0.98 0.00 98.93 0.00 76.64
+ 10 5 0.03 1.62 3.38 0.09 0.00 99.87 0.00 76.64
+ 10 11 0.07 1.62 3.38 0.06 0.00 99.87 0.00 76.64
+.fi
+.SH SUMMARY EXAMPLE
+The "-s" option prints the column headers just once,
+and then the one line system summary for each sample interval.
+
+.nf
+[root@x980]# ./turbostat -s
+ %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
+ 0.61 1.89 3.38 5.95 0.00 93.44 0.00 66.33
+ 0.52 1.62 3.38 6.83 0.00 92.65 0.00 61.11
+ 0.62 1.92 3.38 5.47 0.00 93.91 0.00 67.31
.fi
.SH VERBOSE EXAMPLE
The "-v" option adds verbosity to the output:
@@ -101,33 +118,33 @@ until ^C while the other CPUs are mostly idle:
.nf
[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
-
-^Ccr CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00
- 0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00
- 0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00
- 1 2 0.11 3.14 3.38 0.19 3.95 95.75 0.00 0.00
- 1 8 0.05 2.88 3.38 0.25 3.95 95.75 0.00 0.00
- 2 4 0.00 3.13 3.38 0.02 0.00 99.98 0.00 0.00
- 2 10 0.00 3.09 3.38 0.02 0.00 99.98 0.00 0.00
- 8 1 0.04 3.50 3.38 14.43 0.00 85.54 0.00 0.00
- 8 7 0.03 2.98 3.38 14.43 0.00 85.54 0.00 0.00
- 9 3 0.00 3.16 3.38 100.00 0.00 0.00 0.00 0.00
- 9 9 99.93 3.63 3.38 0.06 0.00 0.00 0.00 0.00
- 10 5 0.01 2.82 3.38 0.08 0.00 99.91 0.00 0.00
- 10 11 0.02 3.36 3.38 0.06 0.00 99.91 0.00 0.00
-6.950866 sec
+^C
+cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
+ 8.63 3.64 3.38 14.46 0.49 76.42 0.00 0.00
+ 0 0 0.34 3.36 3.38 99.66 0.00 0.00 0.00 0.00
+ 0 6 99.96 3.64 3.38 0.04 0.00 0.00 0.00 0.00
+ 1 2 0.14 3.50 3.38 1.75 2.04 96.07 0.00 0.00
+ 1 8 0.38 3.57 3.38 1.51 2.04 96.07 0.00 0.00
+ 2 4 0.01 2.65 3.38 0.06 0.00 99.93 0.00 0.00
+ 2 10 0.03 2.12 3.38 0.04 0.00 99.93 0.00 0.00
+ 8 1 0.91 3.59 3.38 35.27 0.92 62.90 0.00 0.00
+ 8 7 1.61 3.63 3.38 34.57 0.92 62.90 0.00 0.00
+ 9 3 0.04 3.38 3.38 0.20 0.00 99.76 0.00 0.00
+ 9 9 0.04 3.29 3.38 0.20 0.00 99.76 0.00 0.00
+ 10 5 0.03 3.08 3.38 0.12 0.00 99.85 0.00 0.00
+ 10 11 0.05 3.07 3.38 0.10 0.00 99.85 0.00 0.00
+4.907015 sec
.fi
-Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
+Above the cycle soaker drives cpu6 up 3.6 Ghz turbo limit
while the other processors are generally in various states of idle.
-Note that cpu3 is an HT sibling sharing core9
-with cpu9, and thus it is unable to get to an idle state
-deeper than c1 while cpu9 is busy.
+Note that cpu0 is an HT sibling sharing core0
+with cpu6, and thus it is unable to get to an idle state
+deeper than c1 while cpu6 is busy.
-Note that turbostat reports average GHz of 3.61, while
-the arithmetic average of the GHz column above is 3.24.
+Note that turbostat reports average GHz of 3.64, while
+the arithmetic average of the GHz column above is lower.
This is a weighted average, where the weight is %c0. ie. it is the total number of
un-halted cycles elapsed per time divided by the number of CPUs.
.SH NOTES
@@ -167,6 +184,6 @@ http://www.intel.com/products/processor/manuals/
.SH "SEE ALSO"
msr(4), vmstat(8)
.PP
-.SH AUTHORS
+.SH AUTHOR
.nf
Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 310d3dd5e547..ab2f682fd44c 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2,7 +2,7 @@
* turbostat -- show CPU frequency and C-state residency
* on modern Intel turbo-capable processors.
*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2012 Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
@@ -32,6 +33,7 @@
#include <dirent.h>
#include <string.h>
#include <ctype.h>
+#include <sched.h>
#define MSR_TSC 0x10
#define MSR_NEHALEM_PLATFORM_INFO 0xCE
@@ -49,6 +51,7 @@
char *proc_stat = "/proc/stat";
unsigned int interval_sec = 5; /* set with -i interval_sec */
unsigned int verbose; /* set with -v */
+unsigned int summary_only; /* set with -s */
unsigned int skip_c0;
unsigned int skip_c1;
unsigned int do_nhm_cstates;
@@ -68,9 +71,10 @@ unsigned int show_cpu;
int aperf_mperf_unstable;
int backwards_count;
char *progname;
-int need_reinitialize;
int num_cpus;
+cpu_set_t *cpu_mask;
+size_t cpu_mask_size;
struct counters {
unsigned long long tsc; /* per thread */
@@ -99,44 +103,76 @@ struct timeval tv_even;
struct timeval tv_odd;
struct timeval tv_delta;
-unsigned long long get_msr(int cpu, off_t offset)
+/*
+ * cpu_mask_init(ncpus)
+ *
+ * allocate and clear cpu_mask
+ * set cpu_mask_size
+ */
+void cpu_mask_init(int ncpus)
+{
+ cpu_mask = CPU_ALLOC(ncpus);
+ if (cpu_mask == NULL) {
+ perror("CPU_ALLOC");
+ exit(3);
+ }
+ cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
+ CPU_ZERO_S(cpu_mask_size, cpu_mask);
+}
+
+void cpu_mask_uninit()
+{
+ CPU_FREE(cpu_mask);
+ cpu_mask = NULL;
+ cpu_mask_size = 0;
+}
+
+int cpu_migrate(int cpu)
+{
+ CPU_ZERO_S(cpu_mask_size, cpu_mask);
+ CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
+ if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
+ return -1;
+ else
+ return 0;
+}
+
+int get_msr(int cpu, off_t offset, unsigned long long *msr)
{
ssize_t retval;
- unsigned long long msr;
char pathname[32];
int fd;
sprintf(pathname, "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDONLY);
- if (fd < 0) {
- perror(pathname);
- need_reinitialize = 1;
- return 0;
- }
-
- retval = pread(fd, &msr, sizeof msr, offset);
- if (retval != sizeof msr) {
- fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
- cpu, offset, retval);
- exit(-2);
- }
+ if (fd < 0)
+ return -1;
+ retval = pread(fd, msr, sizeof *msr, offset);
close(fd);
- return msr;
+
+ if (retval != sizeof *msr)
+ return -1;
+
+ return 0;
}
void print_header(void)
{
if (show_pkg)
fprintf(stderr, "pk");
+ if (show_pkg)
+ fprintf(stderr, " ");
if (show_core)
- fprintf(stderr, " cr");
+ fprintf(stderr, "cor");
if (show_cpu)
fprintf(stderr, " CPU");
+ if (show_pkg || show_core || show_cpu)
+ fprintf(stderr, " ");
if (do_nhm_cstates)
- fprintf(stderr, " %%c0 ");
+ fprintf(stderr, " %%c0");
if (has_aperf)
- fprintf(stderr, " GHz");
+ fprintf(stderr, " GHz");
fprintf(stderr, " TSC");
if (do_nhm_cstates)
fprintf(stderr, " %%c1");
@@ -147,13 +183,13 @@ void print_header(void)
if (do_snb_cstates)
fprintf(stderr, " %%c7");
if (do_snb_cstates)
- fprintf(stderr, " %%pc2");
+ fprintf(stderr, " %%pc2");
if (do_nhm_cstates)
- fprintf(stderr, " %%pc3");
+ fprintf(stderr, " %%pc3");
if (do_nhm_cstates)
- fprintf(stderr, " %%pc6");
+ fprintf(stderr, " %%pc6");
if (do_snb_cstates)
- fprintf(stderr, " %%pc7");
+ fprintf(stderr, " %%pc7");
if (extra_msr_offset)
fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
@@ -187,6 +223,15 @@ void dump_list(struct counters *cnt)
dump_cnt(cnt);
}
+/*
+ * column formatting convention & formats
+ * package: "pk" 2 columns %2d
+ * core: "cor" 3 columns %3d
+ * CPU: "CPU" 3 columns %3d
+ * GHz: "GHz" 3 columns %3.2
+ * TSC: "TSC" 3 columns %3.2
+ * percentage " %pc3" %6.2
+ */
void print_cnt(struct counters *p)
{
double interval_float;
@@ -196,39 +241,45 @@ void print_cnt(struct counters *p)
/* topology columns, print blanks on 1st (average) line */
if (p == cnt_average) {
if (show_pkg)
+ fprintf(stderr, " ");
+ if (show_pkg && show_core)
fprintf(stderr, " ");
if (show_core)
- fprintf(stderr, " ");
+ fprintf(stderr, " ");
if (show_cpu)
- fprintf(stderr, " ");
+ fprintf(stderr, " " " ");
} else {
if (show_pkg)
- fprintf(stderr, "%d", p->pkg);
+ fprintf(stderr, "%2d", p->pkg);
+ if (show_pkg && show_core)
+ fprintf(stderr, " ");
if (show_core)
- fprintf(stderr, "%4d", p->core);
+ fprintf(stderr, "%3d", p->core);
if (show_cpu)
- fprintf(stderr, "%4d", p->cpu);
+ fprintf(stderr, " %3d", p->cpu);
}
/* %c0 */
if (do_nhm_cstates) {
+ if (show_pkg || show_core || show_cpu)
+ fprintf(stderr, " ");
if (!skip_c0)
- fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
+ fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
else
- fprintf(stderr, " ****");
+ fprintf(stderr, " ****");
}
/* GHz */
if (has_aperf) {
if (!aperf_mperf_unstable) {
- fprintf(stderr, "%5.2f",
+ fprintf(stderr, " %3.2f",
1.0 * p->tsc / units * p->aperf /
p->mperf / interval_float);
} else {
if (p->aperf > p->tsc || p->mperf > p->tsc) {
- fprintf(stderr, " ****");
+ fprintf(stderr, " ***");
} else {
- fprintf(stderr, "%4.1f*",
+ fprintf(stderr, "%3.1f*",
1.0 * p->tsc /
units * p->aperf /
p->mperf / interval_float);
@@ -241,7 +292,7 @@ void print_cnt(struct counters *p)
if (do_nhm_cstates) {
if (!skip_c1)
- fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
else
fprintf(stderr, " ****");
}
@@ -252,13 +303,13 @@ void print_cnt(struct counters *p)
if (do_snb_cstates)
fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
if (do_snb_cstates)
- fprintf(stderr, " %5.2f", 100.0 * p->pc2/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
if (do_nhm_cstates)
- fprintf(stderr, " %5.2f", 100.0 * p->pc3/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
if (do_nhm_cstates)
- fprintf(stderr, " %5.2f", 100.0 * p->pc6/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
if (do_snb_cstates)
- fprintf(stderr, " %5.2f", 100.0 * p->pc7/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
if (extra_msr_offset)
fprintf(stderr, " 0x%016llx", p->extra_msr);
putc('\n', stderr);
@@ -267,12 +318,20 @@ void print_cnt(struct counters *p)
void print_counters(struct counters *counters)
{
struct counters *cnt;
+ static int printed;
+
- print_header();
+ if (!printed || !summary_only)
+ print_header();
if (num_cpus > 1)
print_cnt(cnt_average);
+ printed = 1;
+
+ if (summary_only)
+ return;
+
for (cnt = counters; cnt != NULL; cnt = cnt->next)
print_cnt(cnt);
@@ -440,31 +499,51 @@ void compute_average(struct counters *delta, struct counters *avg)
free(sum);
}
-void get_counters(struct counters *cnt)
+int get_counters(struct counters *cnt)
{
for ( ; cnt; cnt = cnt->next) {
- cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
- if (do_nhm_cstates)
- cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
- if (do_nhm_cstates)
- cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
- if (do_snb_cstates)
- cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
- if (has_aperf)
- cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
- if (has_aperf)
- cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
- if (do_snb_cstates)
- cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
- if (do_nhm_cstates)
- cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
- if (do_nhm_cstates)
- cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
+
+ if (cpu_migrate(cnt->cpu))
+ return -1;
+
+ if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
+ return -1;
+
+ if (has_aperf) {
+ if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
+ return -1;
+ if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
+ return -1;
+ }
+
+ if (do_nhm_cstates) {
+ if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
+ return -1;
+ if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
+ return -1;
+ }
+
if (do_snb_cstates)
- cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
+ if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
+ return -1;
+
+ if (do_nhm_cstates) {
+ if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
+ return -1;
+ if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
+ return -1;
+ }
+ if (do_snb_cstates) {
+ if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
+ return -1;
+ if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
+ return -1;
+ }
if (extra_msr_offset)
- cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
+ if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
+ return -1;
}
+ return 0;
}
void print_nehalem_info(void)
@@ -475,7 +554,7 @@ void print_nehalem_info(void)
if (!do_nehalem_platform_info)
return;
- msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
+ get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
ratio = (msr >> 40) & 0xFF;
fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
@@ -491,7 +570,7 @@ void print_nehalem_info(void)
if (!do_nehalem_turbo_ratio_limit)
return;
- msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
+ get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
ratio = (msr >> 24) & 0xFF;
if (ratio)
@@ -557,7 +636,8 @@ void insert_counters(struct counters **list,
return;
}
- show_cpu = 1; /* there is more than one CPU */
+ if (!summary_only)
+ show_cpu = 1; /* there is more than one CPU */
/*
* insert on front of list.
@@ -575,13 +655,15 @@ void insert_counters(struct counters **list,
while (prev->next && (prev->next->pkg < new->pkg)) {
prev = prev->next;
- show_pkg = 1; /* there is more than 1 package */
+ if (!summary_only)
+ show_pkg = 1; /* there is more than 1 package */
}
while (prev->next && (prev->next->pkg == new->pkg)
&& (prev->next->core < new->core)) {
prev = prev->next;
- show_core = 1; /* there is more than 1 core */
+ if (!summary_only)
+ show_core = 1; /* there is more than 1 core */
}
while (prev->next && (prev->next->pkg == new->pkg)
@@ -681,7 +763,7 @@ int get_core_id(int cpu)
}
/*
- * run func(index, cpu) on every cpu in /proc/stat
+ * run func(pkg, core, cpu) on every cpu in /proc/stat
*/
int for_all_cpus(void (func)(int, int, int))
@@ -717,18 +799,18 @@ int for_all_cpus(void (func)(int, int, int))
void re_initialize(void)
{
- printf("turbostat: topology changed, re-initializing.\n");
free_all_counters();
num_cpus = for_all_cpus(alloc_new_counters);
- need_reinitialize = 0;
- printf("num_cpus is now %d\n", num_cpus);
+ cpu_mask_uninit();
+ cpu_mask_init(num_cpus);
+ printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
}
void dummy(int pkg, int core, int cpu) { return; }
/*
* check to see if a cpu came on-line
*/
-void verify_num_cpus(void)
+int verify_num_cpus(void)
{
int new_num_cpus;
@@ -738,8 +820,9 @@ void verify_num_cpus(void)
if (verbose)
printf("num_cpus was %d, is now %d\n",
num_cpus, new_num_cpus);
- need_reinitialize = 1;
+ return -1;
}
+ return 0;
}
void turbostat_loop()
@@ -749,25 +832,25 @@ restart:
gettimeofday(&tv_even, (struct timezone *)NULL);
while (1) {
- verify_num_cpus();
- if (need_reinitialize) {
+ if (verify_num_cpus()) {
re_initialize();
goto restart;
}
sleep(interval_sec);
- get_counters(cnt_odd);
+ if (get_counters(cnt_odd)) {
+ re_initialize();
+ goto restart;
+ }
gettimeofday(&tv_odd, (struct timezone *)NULL);
-
compute_delta(cnt_odd, cnt_even, cnt_delta);
timersub(&tv_odd, &tv_even, &tv_delta);
compute_average(cnt_delta, cnt_average);
print_counters(cnt_delta);
- if (need_reinitialize) {
+ sleep(interval_sec);
+ if (get_counters(cnt_even)) {
re_initialize();
goto restart;
}
- sleep(interval_sec);
- get_counters(cnt_even);
gettimeofday(&tv_even, (struct timezone *)NULL);
compute_delta(cnt_even, cnt_odd, cnt_delta);
timersub(&tv_even, &tv_odd, &tv_delta);
@@ -953,6 +1036,7 @@ void turbostat_init()
check_super_user();
num_cpus = for_all_cpus(alloc_new_counters);
+ cpu_mask_init(num_cpus);
if (verbose)
print_nehalem_info();
@@ -1005,8 +1089,11 @@ void cmdline(int argc, char **argv)
progname = argv[0];
- while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
+ while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
switch (opt) {
+ case 's':
+ summary_only++;
+ break;
case 'v':
verbose++;
break;